In [1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns

%matplotlib inline

# Импортирование необходимых модулей и атрибутов
from sklearn import linear_model
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split, GridSearchCV, KFold, cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import Normalizer
from matplotlib import pyplot
from sklearn.neighbors import KNeighborsRegressor
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_percentage_error, mean_absolute_error
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.linear_model import LinearRegression, LogisticRegression, SGDRegressor
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.neural_network import MLPRegressor
from keras.wrappers.scikit_learn import KerasRegressor
import warnings
warnings.filterwarnings("ignore")
import tensorflow as tf

from tensorflow import keras
from tensorflow.keras import layers
from keras.models import Sequential
from keras.layers import Activation, Dense
from tensorflow.keras.layers import Dropout
In [2]:
df = pd.read_excel(r"C:\Users\55944\Desktop\888\data_iqr_2.xlsx")
df.drop(['Unnamed: 0'], axis=1, inplace=True)
df.head(5)
Out[2]:
Соотношение матрица-наполнитель Плотность, кг/м3 модуль упругости, ГПа Количество отвердителя, м.% Содержание эпоксидных групп,%_2 Температура вспышки, С_2 Поверхностная плотность, г/м2 Модуль упругости при растяжении, ГПа Прочность при растяжении, МПа Потребление смолы, г/м2 Угол нашивки Шаг нашивки Плотность нашивки
0 1.857143 2030.0 738.736842 50.00 23.750000 284.615385 210.0 70.0 3000.0 220.0 0 4.0 60.0
1 1.857143 2030.0 738.736842 129.00 21.250000 300.000000 210.0 70.0 3000.0 220.0 0 5.0 47.0
2 2.771331 2030.0 753.000000 111.86 22.267857 284.615385 210.0 70.0 3000.0 220.0 0 5.0 57.0
3 2.767918 2000.0 748.000000 111.86 22.267857 284.615385 210.0 70.0 3000.0 220.0 0 5.0 60.0
4 2.569620 1910.0 807.000000 111.86 22.267857 284.615385 210.0 70.0 3000.0 220.0 0 5.0 70.0
In [3]:
columns = ['Соотношение матрица-наполнитель', 'Плотность, кг/м3',
       'модуль упругости, ГПа', 'Количество отвердителя, м.%',
       'Содержание эпоксидных групп,%_2', 'Температура вспышки, С_2',
       'Поверхностная плотность, г/м2', 'Модуль упругости при растяжении, ГПа','Потребление смолы', 'Угол нашивки',
       'Шаг нашивки', 'Плотность нашивки','Прочность при растяжении, МПа']
In [4]:
df.columns
Out[4]:
Index(['Соотношение матрица-наполнитель', 'Плотность, кг/м3',
       'модуль упругости, ГПа', 'Количество отвердителя, м.%',
       'Содержание эпоксидных групп,%_2', 'Температура вспышки, С_2',
       'Поверхностная плотность, г/м2', 'Модуль упругости при растяжении, ГПа',
       'Прочность при растяжении, МПа', 'Потребление смолы, г/м2',
       'Угол нашивки', 'Шаг нашивки', 'Плотность нашивки'],
      dtype='object')
In [5]:
x=df[[ 'Плотность, кг/м3',
       'модуль упругости, ГПа', 'Количество отвердителя, м.%',
       'Содержание эпоксидных групп,%_2', 'Температура вспышки, С_2',
       'Поверхностная плотность, г/м2', 'Модуль упругости при растяжении, ГПа',
       'Прочность при растяжении, МПа', 'Потребление смолы, г/м2',
       'Угол нашивки', 'Шаг нашивки', 'Плотность нашивки']]
# целевую переменную поместим в переменную y
y=df[['Соотношение матрица-наполнитель']]
In [6]:
x_train, x_test, y_train, y_test = train_test_split(x, y, 
                                                    test_size = 0.3, 
                                                    random_state = 42)

Построим простую линейную модель 1¶

In [7]:
normalizer = tf.keras.layers.Normalization(axis=-1)
In [8]:
normalizer.adapt(np.array(x))
In [9]:
model_1 = tf.keras.Sequential([
    normalizer,
    layers.Dense(units=1)
])
model_1.summary()
Model: "sequential"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 normalization (Normalizatio  (None, 12)               25        
 n)                                                              
                                                                 
 dense (Dense)               (None, 1)                 13        
                                                                 
=================================================================
Total params: 38
Trainable params: 13
Non-trainable params: 25
_________________________________________________________________
In [10]:
model_1.compile(
    optimizer=tf.optimizers.Adam(learning_rate=0.1),
    loss='mean_squared_error')
In [11]:
history = model_1.fit(
    x_train,
    y_train,
    epochs=100,
    verbose=1,
    validation_split=0.2)
Epoch 1/100
17/17 [==============================] - 3s 24ms/step - loss: 6.4692 - val_loss: 3.2980
Epoch 2/100
17/17 [==============================] - 0s 3ms/step - loss: 1.5264 - val_loss: 0.9627
Epoch 3/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8816 - val_loss: 0.9008
Epoch 4/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8692 - val_loss: 0.8337
Epoch 5/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8517 - val_loss: 0.9136
Epoch 6/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8561 - val_loss: 0.8651
Epoch 7/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8878 - val_loss: 0.8525
Epoch 8/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8808 - val_loss: 0.9731
Epoch 9/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8672 - val_loss: 0.8669
Epoch 10/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8665 - val_loss: 0.8806
Epoch 11/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8836 - val_loss: 0.9271
Epoch 12/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9072 - val_loss: 0.9124
Epoch 13/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9158 - val_loss: 0.8324
Epoch 14/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9768 - val_loss: 1.0937
Epoch 15/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9203 - val_loss: 0.8537
Epoch 16/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8839 - val_loss: 0.9084
Epoch 17/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8616 - val_loss: 0.9031
Epoch 18/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9097 - val_loss: 0.8383
Epoch 19/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8729 - val_loss: 0.9369
Epoch 20/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9163 - val_loss: 0.9169
Epoch 21/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9819 - val_loss: 0.8440
Epoch 22/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9076 - val_loss: 0.9978
Epoch 23/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9181 - val_loss: 0.8946
Epoch 24/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9206 - val_loss: 0.9092
Epoch 25/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8810 - val_loss: 0.9439
Epoch 26/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8687 - val_loss: 0.8468
Epoch 27/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8843 - val_loss: 0.8888
Epoch 28/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9231 - val_loss: 0.9191
Epoch 29/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8921 - val_loss: 0.8975
Epoch 30/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9199 - val_loss: 0.9331
Epoch 31/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8979 - val_loss: 0.9269
Epoch 32/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8993 - val_loss: 0.8845
Epoch 33/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8897 - val_loss: 0.8998
Epoch 34/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8609 - val_loss: 0.9103
Epoch 35/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8729 - val_loss: 0.9003
Epoch 36/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8703 - val_loss: 0.8878
Epoch 37/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8609 - val_loss: 0.9627
Epoch 38/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8889 - val_loss: 0.8886
Epoch 39/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9442 - val_loss: 0.9490
Epoch 40/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9145 - val_loss: 0.9103
Epoch 41/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9040 - val_loss: 0.8587
Epoch 42/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9278 - val_loss: 0.9363
Epoch 43/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9417 - val_loss: 0.9255
Epoch 44/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8774 - val_loss: 0.9184
Epoch 45/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9022 - val_loss: 1.0660
Epoch 46/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9337 - val_loss: 0.8471
Epoch 47/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9035 - val_loss: 0.9006
Epoch 48/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9401 - val_loss: 0.9710
Epoch 49/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8999 - val_loss: 0.8889
Epoch 50/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9096 - val_loss: 0.9095
Epoch 51/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9119 - val_loss: 0.9712
Epoch 52/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8601 - val_loss: 0.8966
Epoch 53/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8712 - val_loss: 0.9189
Epoch 54/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9144 - val_loss: 0.9368
Epoch 55/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9766 - val_loss: 0.9206
Epoch 56/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9941 - val_loss: 1.0292
Epoch 57/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9883 - val_loss: 0.8990
Epoch 58/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9238 - val_loss: 0.9165
Epoch 59/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9645 - val_loss: 0.9362
Epoch 60/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9058 - val_loss: 0.8973
Epoch 61/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9161 - val_loss: 0.9951
Epoch 62/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9278 - val_loss: 0.8239
Epoch 63/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9566 - val_loss: 0.9595
Epoch 64/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8924 - val_loss: 0.8878
Epoch 65/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8987 - val_loss: 0.8942
Epoch 66/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9237 - val_loss: 0.9372
Epoch 67/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9413 - val_loss: 1.0666
Epoch 68/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9107 - val_loss: 0.9015
Epoch 69/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8846 - val_loss: 0.9547
Epoch 70/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9184 - val_loss: 0.9142
Epoch 71/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8605 - val_loss: 0.8839
Epoch 72/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8586 - val_loss: 0.9218
Epoch 73/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9397 - val_loss: 0.9300
Epoch 74/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8864 - val_loss: 0.9876
Epoch 75/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9372 - val_loss: 0.9176
Epoch 76/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9446 - val_loss: 0.9044
Epoch 77/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8954 - val_loss: 0.9626
Epoch 78/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9104 - val_loss: 0.8949
Epoch 79/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9158 - val_loss: 0.8873
Epoch 80/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8787 - val_loss: 0.9567
Epoch 81/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9229 - val_loss: 0.9292
Epoch 82/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8989 - val_loss: 1.0450
Epoch 83/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9692 - val_loss: 0.8759
Epoch 84/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9375 - val_loss: 0.9016
Epoch 85/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9639 - val_loss: 0.9974
Epoch 86/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9121 - val_loss: 0.9566
Epoch 87/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8670 - val_loss: 0.8983
Epoch 88/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9719 - val_loss: 0.9215
Epoch 89/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9675 - val_loss: 1.0988
Epoch 90/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9503 - val_loss: 0.9056
Epoch 91/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9169 - val_loss: 0.8747
Epoch 92/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9393 - val_loss: 0.9005
Epoch 93/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9302 - val_loss: 1.0093
Epoch 94/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9362 - val_loss: 0.8611
Epoch 95/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9114 - val_loss: 0.9243
Epoch 96/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9142 - val_loss: 0.9943
Epoch 97/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8858 - val_loss: 0.8489
Epoch 98/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9174 - val_loss: 0.9711
Epoch 99/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9034 - val_loss: 0.9408
Epoch 100/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9044 - val_loss: 0.8991
In [12]:
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
Out[12]:
loss val_loss epoch
95 0.914181 0.994310 95
96 0.885819 0.848887 96
97 0.917376 0.971146 97
98 0.903369 0.940767 98
99 0.904427 0.899107 99
In [13]:
def plot_loss(history, lim = [0, 10]):
    plt.plot(history.history['loss'], label='loss')
    plt.plot(history.history['val_loss'], label='val_loss')
    plt.ylim(lim)
    plt.xlabel('Epoch')
    plt.ylabel('MSE')
    plt.legend()
    plt.grid(True)
In [14]:
plot_loss(history)
In [15]:
y_pred_model_1 = model_1.predict(x_test)
9/9 [==============================] - 1s 2ms/step
In [16]:
a = plt.axes(aspect='equal')
plt.scatter(y_test, y_pred_model_1)
plt.xlabel('True Values')
plt.ylabel('Predictions')
lims = [0, 5]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims)
In [17]:
# Зададим функцию для визуализации факт/прогноз для результатов моделей
# Посмотрим на график результата работы модели
def actual_and_predicted_plot(orig, predict, var, model_name):    
    plt.figure(figsize=(17,5))
    plt.title(f'Тестовые и прогнозные значения: {model_name}')
    plt.plot(orig, label = 'Тест')
    plt.plot(predict, label = 'Прогноз')
    plt.legend(loc = 'best')
    plt.ylabel(var)
    plt.xlabel('Количество наблюдений')
    plt.show()
actual_and_predicted_plot(y_test.values, model_1.predict(x_test.values), 'Cоотношение матрица/наполнитель', 'model_1')
9/9 [==============================] - 0s 0s/step

Зададим функцию для вывода метрик в виде таблицы¶

In [18]:
def error (model, x_train, x_test, y_train, y_test, name = 'Model name', trg = 'Целевой параметр'):
    mae_test = mean_absolute_error(y_test, model.predict(x_test))
    mse_test = mean_squared_error(y_test, model.predict(x_test))
    mae_train = mean_absolute_error(y_train, model.predict(x_train))
    mse_train = mean_squared_error(y_train, model.predict(x_train))
    R_Squared=  r2_score(y_test, model.predict(x_test))  
    
    df_error = pd.DataFrame({
        'model':[name],
        'Target param':trg,
        'MAE(test)':mae_test,
        'MAE(train)':mae_train,
        'MSE(test)':mse_test,
        'MSE(train)':mse_train,
        'R_Squared': R_Squared
})
    return df_error
In [19]:
df_1 = error(model_1, x_train, x_test, y_train, y_test,
       name = 'model_1', trg = 'Соотношение матрица-наполнитель')
df_1
9/9 [==============================] - 0s 3ms/step
9/9 [==============================] - 0s 0s/step
21/21 [==============================] - 0s 779us/step
21/21 [==============================] - 0s 779us/step
9/9 [==============================] - 0s 0s/step
Out[19]:
model Target param MAE(test) MAE(train) MSE(test) MSE(train) R_Squared
0 model_1 Соотношение матрица-наполнитель 0.718846 0.72798 0.769704 0.832339 -0.006862

Построим простую линейную модель_2 с теми же параметрами, но включим функцию callbacks¶

In [20]:
# вводим функцию CALLBACKS(остановка обучения когда  целевой показатель перестает улучшаться) 
callback = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10, 
                                                     verbose=1, restore_best_weights=True)
def callbacks(pat = 10):
    callback = keras.callbacks.EarlyStopping(monitor='val_loss', patience=pat, 
                                                     verbose=1, restore_best_weights=True)
    return callback
In [21]:
model_2 = tf.keras.Sequential([
    normalizer,
    layers.Dense(units=1)
])
model_2.summary()
Model: "sequential_1"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 normalization (Normalizatio  (None, 12)               25        
 n)                                                              
                                                                 
 dense_1 (Dense)             (None, 1)                 13        
                                                                 
=================================================================
Total params: 38
Trainable params: 13
Non-trainable params: 25
_________________________________________________________________
In [22]:
model_2.compile(
    optimizer=tf.optimizers.Adam(learning_rate=0.1),
    loss='mean_squared_error')
In [23]:
%%time
history = model_2.fit(
    x_train,
    y_train,
    epochs=100,
    verbose=1,
    callbacks=[callback],
    validation_split=0.2)
Epoch 1/100
17/17 [==============================] - 0s 7ms/step - loss: 6.6594 - val_loss: 2.9646
Epoch 2/100
17/17 [==============================] - 0s 3ms/step - loss: 1.5476 - val_loss: 1.0602
Epoch 3/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9035 - val_loss: 0.9089
Epoch 4/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8957 - val_loss: 0.8824
Epoch 5/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8550 - val_loss: 0.8828
Epoch 6/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9115 - val_loss: 0.9074
Epoch 7/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8638 - val_loss: 0.8776
Epoch 8/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8542 - val_loss: 0.8789
Epoch 9/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8998 - val_loss: 0.9021
Epoch 10/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8747 - val_loss: 0.8825
Epoch 11/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8663 - val_loss: 0.9532
Epoch 12/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9022 - val_loss: 0.8806
Epoch 13/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8910 - val_loss: 0.8811
Epoch 14/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8534 - val_loss: 0.8693
Epoch 15/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8789 - val_loss: 0.9172
Epoch 16/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8787 - val_loss: 0.9283
Epoch 17/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8780 - val_loss: 0.9229
Epoch 18/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9198 - val_loss: 0.9206
Epoch 19/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9067 - val_loss: 0.9180
Epoch 20/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8645 - val_loss: 0.8650
Epoch 21/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8826 - val_loss: 0.8828
Epoch 22/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8993 - val_loss: 0.9576
Epoch 23/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8832 - val_loss: 0.8633
Epoch 24/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8876 - val_loss: 0.9036
Epoch 25/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8993 - val_loss: 0.9981
Epoch 26/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8980 - val_loss: 0.8817
Epoch 27/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8943 - val_loss: 0.8796
Epoch 28/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8720 - val_loss: 0.9236
Epoch 29/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8876 - val_loss: 0.8626
Epoch 30/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9130 - val_loss: 1.0042
Epoch 31/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9319 - val_loss: 0.8701
Epoch 32/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9251 - val_loss: 0.9381
Epoch 33/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9148 - val_loss: 0.9610
Epoch 34/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8785 - val_loss: 0.8852
Epoch 35/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9012 - val_loss: 0.8870
Epoch 36/100
17/17 [==============================] - 0s 4ms/step - loss: 0.8822 - val_loss: 0.9102
Epoch 37/100
17/17 [==============================] - 0s 2ms/step - loss: 0.9018 - val_loss: 0.9395
Epoch 38/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9080 - val_loss: 0.8873
Epoch 39/100
 1/17 [>.............................] - ETA: 0s - loss: 1.2134Restoring model weights from the end of the best epoch: 29.
17/17 [==============================] - 0s 3ms/step - loss: 0.9532 - val_loss: 1.0203
Epoch 39: early stopping
Wall time: 2.34 s
In [24]:
def plot_loss(history, lim = [0, 10]):
    plt.plot(history.history['loss'], label='loss')
    plt.plot(history.history['val_loss'], label='val_loss')
    plt.ylim(lim)
    plt.xlabel('Epoch')
    plt.ylabel('MSE')
    plt.legend()
    plt.grid(True)
In [25]:
plot_loss(history)
In [26]:
y_pred_model_2 = model_2.predict(x_test)
9/9 [==============================] - 0s 2ms/step
In [27]:
a = plt.axes(aspect='equal')
plt.scatter(y_test, y_pred_model_2)
plt.xlabel('True Values')
plt.ylabel('Predictions')
lims = [0, 5]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims)
In [28]:
def actual_and_predicted_plot(orig, predict, var, model_name):    
    plt.figure(figsize=(17,5))
    plt.title(f'Тестовые и прогнозные значения: {model_name}')
    plt.plot(orig, label = 'Тест')
    plt.plot(predict, label = 'Прогноз')
    plt.legend(loc = 'best')
    plt.ylabel(var)
    plt.xlabel('Количество наблюдений')
    plt.show()
actual_and_predicted_plot(y_test.values, model_2.predict(x_test.values), 'Cоотношение матрица/наполнитель', 'model_2')
9/9 [==============================] - 0s 2ms/step
In [29]:
df_2 = error(model_2, x_train, x_test, y_train, y_test,
       name = 'model_2', trg = 'Соотношение матрица-наполнитель')
df_2
9/9 [==============================] - 0s 2ms/step
9/9 [==============================] - 0s 2ms/step
21/21 [==============================] - 0s 325us/step
21/21 [==============================] - 0s 779us/step
9/9 [==============================] - 0s 0s/step
Out[29]:
model Target param MAE(test) MAE(train) MSE(test) MSE(train) R_Squared
0 model_2 Соотношение матрица-наполнитель 0.711121 0.741844 0.756206 0.854804 0.010795

Построим простую линейную модель с теми же параметрами, но изменим оптимизатор на SGD¶

In [30]:
model_3 = tf.keras.Sequential([
    normalizer,
    layers.Dense(units=1)
])
model_3.summary()
Model: "sequential_2"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 normalization (Normalizatio  (None, 12)               25        
 n)                                                              
                                                                 
 dense_2 (Dense)             (None, 1)                 13        
                                                                 
=================================================================
Total params: 38
Trainable params: 13
Non-trainable params: 25
_________________________________________________________________
In [31]:
model_3.compile(
    optimizer=tf.optimizers.SGD(learning_rate=0.1),
    loss='mean_squared_error')
In [32]:
%%time
history = model_3.fit(
    x_train,
    y_train,
    epochs=100,
    verbose=1,
    callbacks=[callback],
    validation_split=0.2)
Epoch 1/100
17/17 [==============================] - 0s 8ms/step - loss: 2.7424 - val_loss: 0.9859
Epoch 2/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8596 - val_loss: 1.0545
Epoch 3/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8788 - val_loss: 0.9254
Epoch 4/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8733 - val_loss: 0.9551
Epoch 5/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8604 - val_loss: 1.0071
Epoch 6/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8612 - val_loss: 0.9417
Epoch 7/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8536 - val_loss: 1.0414
Epoch 8/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8612 - val_loss: 1.0788
Epoch 9/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8805 - val_loss: 0.8980
Epoch 10/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8413 - val_loss: 1.1327
Epoch 11/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8847 - val_loss: 1.0290
Epoch 12/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8692 - val_loss: 0.8725
Epoch 13/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8633 - val_loss: 0.8990
Epoch 14/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8509 - val_loss: 0.9489
Epoch 15/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8563 - val_loss: 1.0181
Epoch 16/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8674 - val_loss: 1.0781
Epoch 17/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8847 - val_loss: 0.9925
Epoch 18/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8648 - val_loss: 1.0226
Epoch 19/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8843 - val_loss: 0.8908
Epoch 20/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8667 - val_loss: 0.8927
Epoch 21/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8473 - val_loss: 0.9375
Epoch 22/100
 1/17 [>.............................] - ETA: 0s - loss: 1.0977Restoring model weights from the end of the best epoch: 12.
17/17 [==============================] - 0s 3ms/step - loss: 0.8656 - val_loss: 1.0945
Epoch 22: early stopping
Wall time: 1.43 s
In [33]:
def plot_loss(history, lim = [0, 10]):
    plt.plot(history.history['loss'], label='loss')
    plt.plot(history.history['val_loss'], label='val_loss')
    plt.ylim(lim)
    plt.xlabel('Epoch')
    plt.ylabel('MSE')
    plt.legend()
    plt.grid(True)
In [34]:
plot_loss(history)
In [35]:
y_pred_model_3 = model_3.predict(x_test)
9/9 [==============================] - 0s 2ms/step
In [36]:
a = plt.axes(aspect='equal')
plt.scatter(y_test, y_pred_model_3)
plt.xlabel('True Values')
plt.ylabel('Predictions')
lims = [0, 5]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims)
In [37]:
def actual_and_predicted_plot(orig, predict, var, model_name):    
    plt.figure(figsize=(17,5))
    plt.title(f'Тестовые и прогнозные значения: {model_name}')
    plt.plot(orig, label = 'Тест')
    plt.plot(predict, label = 'Прогноз')
    plt.legend(loc = 'best')
    plt.ylabel(var)
    plt.xlabel('Количество наблюдений')
    plt.show()
actual_and_predicted_plot(y_test.values, model_3.predict(x_test.values), 'Cоотношение матрица/наполнитель', 'model_3')
9/9 [==============================] - 0s 1ms/step
In [38]:
df_3 = error(model_3, x_train, x_test, y_train, y_test,
       name = 'model_3', trg = 'Соотношение матрица-наполнитель')
df_3
9/9 [==============================] - 0s 0s/step
9/9 [==============================] - 0s 0s/step
21/21 [==============================] - 0s 781us/step
21/21 [==============================] - 0s 782us/step
9/9 [==============================] - 0s 2ms/step
Out[38]:
model Target param MAE(test) MAE(train) MSE(test) MSE(train) R_Squared
0 model_3 Соотношение матрица-наполнитель 0.746268 0.75142 0.828954 0.881878 -0.084367

Построим простую линейную модель с теми же параметрами, но изменим оптимизатор на RMSprop¶

In [39]:
model_4 = tf.keras.Sequential([
    normalizer,
    layers.Dense(units=1)
])
model_4.summary()
Model: "sequential_3"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 normalization (Normalizatio  (None, 12)               25        
 n)                                                              
                                                                 
 dense_3 (Dense)             (None, 1)                 13        
                                                                 
=================================================================
Total params: 38
Trainable params: 13
Non-trainable params: 25
_________________________________________________________________
In [40]:
model_4.compile(
    optimizer=tf.optimizers.RMSprop(learning_rate=0.01),
    loss='mean_squared_error')
In [41]:
%%time
history = model_4.fit(
    x_train,
    y_train,
    epochs=100,
    verbose=1,
    callbacks=[callback],
    validation_split=0.2)
Epoch 1/100
17/17 [==============================] - 1s 19ms/step - loss: 9.9180 - val_loss: 8.6530
Epoch 2/100
17/17 [==============================] - 0s 3ms/step - loss: 8.2423 - val_loss: 7.5846
Epoch 3/100
17/17 [==============================] - 0s 3ms/step - loss: 7.1031 - val_loss: 6.6473
Epoch 4/100
17/17 [==============================] - 0s 3ms/step - loss: 6.1435 - val_loss: 5.8350
Epoch 5/100
17/17 [==============================] - 0s 2ms/step - loss: 5.3009 - val_loss: 5.1581
Epoch 6/100
17/17 [==============================] - 0s 3ms/step - loss: 4.5842 - val_loss: 4.5356
Epoch 7/100
17/17 [==============================] - 0s 3ms/step - loss: 3.9440 - val_loss: 3.9675
Epoch 8/100
17/17 [==============================] - 0s 3ms/step - loss: 3.3874 - val_loss: 3.4732
Epoch 9/100
17/17 [==============================] - 0s 3ms/step - loss: 2.9014 - val_loss: 3.0082
Epoch 10/100
17/17 [==============================] - 0s 3ms/step - loss: 2.4654 - val_loss: 2.5389
Epoch 11/100
17/17 [==============================] - 0s 3ms/step - loss: 2.0786 - val_loss: 2.1993
Epoch 12/100
17/17 [==============================] - 0s 3ms/step - loss: 1.7558 - val_loss: 1.8652
Epoch 13/100
17/17 [==============================] - 0s 3ms/step - loss: 1.4857 - val_loss: 1.5858
Epoch 14/100
17/17 [==============================] - 0s 3ms/step - loss: 1.2619 - val_loss: 1.3957
Epoch 15/100
17/17 [==============================] - 0s 3ms/step - loss: 1.1006 - val_loss: 1.1728
Epoch 16/100
17/17 [==============================] - 0s 3ms/step - loss: 0.9707 - val_loss: 1.0287
Epoch 17/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8922 - val_loss: 0.9700
Epoch 18/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8528 - val_loss: 0.9420
Epoch 19/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8287 - val_loss: 0.9221
Epoch 20/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8178 - val_loss: 0.8905
Epoch 21/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8174 - val_loss: 0.8806
Epoch 22/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8139 - val_loss: 0.8734
Epoch 23/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8124 - val_loss: 0.8614
Epoch 24/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8143 - val_loss: 0.9037
Epoch 25/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8115 - val_loss: 0.8968
Epoch 26/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8145 - val_loss: 0.8824
Epoch 27/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8167 - val_loss: 0.8902
Epoch 28/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8135 - val_loss: 0.8871
Epoch 29/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8191 - val_loss: 0.8748
Epoch 30/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8148 - val_loss: 0.8849
Epoch 31/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8129 - val_loss: 0.8707
Epoch 32/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8156 - val_loss: 0.8514
Epoch 33/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8146 - val_loss: 0.8850
Epoch 34/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8135 - val_loss: 0.8832
Epoch 35/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8139 - val_loss: 0.8793
Epoch 36/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8116 - val_loss: 0.8910
Epoch 37/100
17/17 [==============================] - 0s 4ms/step - loss: 0.8143 - val_loss: 0.8571
Epoch 38/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8151 - val_loss: 0.8549
Epoch 39/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8144 - val_loss: 0.8571
Epoch 40/100
17/17 [==============================] - 0s 2ms/step - loss: 0.8143 - val_loss: 0.8572
Epoch 41/100
17/17 [==============================] - 0s 3ms/step - loss: 0.8148 - val_loss: 0.8706
Epoch 42/100
 1/17 [>.............................] - ETA: 0s - loss: 0.8429Restoring model weights from the end of the best epoch: 32.
17/17 [==============================] - 0s 2ms/step - loss: 0.8164 - val_loss: 0.8834
Epoch 42: early stopping
Wall time: 2.78 s
In [42]:
def plot_loss(history, lim = [0, 10]):
    plt.plot(history.history['loss'], label='loss')
    plt.plot(history.history['val_loss'], label='val_loss')
    plt.ylim(lim)
    plt.xlabel('Epoch')
    plt.ylabel('MSE')
    plt.legend()
    plt.grid(True)
In [43]:
plot_loss(history)
In [44]:
y_pred_model_4 = model_4.predict(x_test)
9/9 [==============================] - 0s 2ms/step
In [45]:
a = plt.axes(aspect='equal')
plt.scatter(y_test, y_pred_model_4)
plt.xlabel('True Values')
plt.ylabel('Predictions')
lims = [0, 5]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims)
In [46]:
def actual_and_predicted_plot(orig, predict, var, model_name):    
    plt.figure(figsize=(17,5))
    plt.title(f'Тестовые и прогнозные значения: {model_name}')
    plt.plot(orig, label = 'Тест')
    plt.plot(predict, label = 'Прогноз')
    plt.legend(loc = 'best')
    plt.ylabel(var)
    plt.xlabel('Количество наблюдений')
    plt.show()
actual_and_predicted_plot(y_test.values, model_4.predict(x_test.values), 'Cоотношение матрица/наполнитель', 'model_4')
9/9 [==============================] - 0s 0s/step
In [47]:
df_4 = error(model_4, x_train, x_test, y_train, y_test,
       name = 'model_4', trg = 'Соотношение матрица-наполнитель')
df_4
9/9 [==============================] - 0s 2ms/step
9/9 [==============================] - 0s 2ms/step
21/21 [==============================] - 0s 1ms/step
21/21 [==============================] - 0s 780us/step
9/9 [==============================] - 0s 2ms/step
Out[47]:
model Target param MAE(test) MAE(train) MSE(test) MSE(train) R_Squared
0 model_4 Соотношение матрица-наполнитель 0.715961 0.71926 0.775988 0.809827 -0.015082
In [48]:
df_result = pd.concat([df_1, df_2, df_3, df_4], axis=0).reset_index(drop = True)
df_result
Out[48]:
model Target param MAE(test) MAE(train) MSE(test) MSE(train) R_Squared
0 model_1 Соотношение матрица-наполнитель 0.718846 0.727980 0.769704 0.832339 -0.006862
1 model_2 Соотношение матрица-наполнитель 0.711121 0.741844 0.756206 0.854804 0.010795
2 model_3 Соотношение матрица-наполнитель 0.746268 0.751420 0.828954 0.881878 -0.084367
3 model_4 Соотношение матрица-наполнитель 0.715961 0.719260 0.775988 0.809827 -0.015082

Построение многослойного персетрона¶

In [49]:
def build_and_compile_model(normalizer):
    model = keras.Sequential([
      normalizer,
      layers.Dense(64, activation='relu'),
      layers.Dense(64, activation='relu'),
      layers.Dense(1)
    ])

    model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
                loss='mean_squared_error')
    return model
In [50]:
mlp_1 = build_and_compile_model(normalizer)
mlp_1.summary()
Model: "sequential_4"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 normalization (Normalizatio  (None, 12)               25        
 n)                                                              
                                                                 
 dense_4 (Dense)             (None, 64)                832       
                                                                 
 dense_5 (Dense)             (None, 64)                4160      
                                                                 
 dense_6 (Dense)             (None, 1)                 65        
                                                                 
=================================================================
Total params: 5,082
Trainable params: 5,057
Non-trainable params: 25
_________________________________________________________________
In [51]:
%%time
history = mlp_1.fit(
    x_train,
    y_train,
    epochs=1023,
    verbose=1,
    callbacks=[callback],
    validation_split=0.2)
Epoch 1/1023
17/17 [==============================] - 1s 10ms/step - loss: 3.6243 - val_loss: 1.4598
Epoch 2/1023
17/17 [==============================] - 0s 3ms/step - loss: 1.3220 - val_loss: 1.1473
Epoch 3/1023
17/17 [==============================] - 0s 3ms/step - loss: 1.1639 - val_loss: 1.1013
Epoch 4/1023
17/17 [==============================] - 0s 3ms/step - loss: 1.0189 - val_loss: 1.1382
Epoch 5/1023
17/17 [==============================] - 0s 2ms/step - loss: 0.9799 - val_loss: 1.1339
Epoch 6/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.9368 - val_loss: 1.1275
Epoch 7/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.9081 - val_loss: 1.1164
Epoch 8/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.8867 - val_loss: 1.1076
Epoch 9/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.8624 - val_loss: 1.1021
Epoch 10/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.8469 - val_loss: 1.1016
Epoch 11/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.8251 - val_loss: 1.0783
Epoch 12/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.8044 - val_loss: 1.0737
Epoch 13/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.8013 - val_loss: 1.1072
Epoch 14/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.7694 - val_loss: 1.0869
Epoch 15/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.7550 - val_loss: 1.0853
Epoch 16/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.7354 - val_loss: 1.0684
Epoch 17/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.7223 - val_loss: 1.0842
Epoch 18/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.7149 - val_loss: 1.0810
Epoch 19/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.6899 - val_loss: 1.0507
Epoch 20/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.6760 - val_loss: 1.0310
Epoch 21/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.6676 - val_loss: 1.0742
Epoch 22/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.6535 - val_loss: 1.0381
Epoch 23/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.6419 - val_loss: 1.0297
Epoch 24/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.6220 - val_loss: 1.0300
Epoch 25/1023
17/17 [==============================] - 0s 2ms/step - loss: 0.6108 - val_loss: 1.0521
Epoch 26/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.6041 - val_loss: 1.0588
Epoch 27/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.5901 - val_loss: 1.0521
Epoch 28/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.5771 - val_loss: 1.0557
Epoch 29/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.5575 - val_loss: 1.0405
Epoch 30/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.5417 - val_loss: 1.0520
Epoch 31/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.5397 - val_loss: 1.0310
Epoch 32/1023
17/17 [==============================] - 0s 3ms/step - loss: 0.5266 - val_loss: 1.0390
Epoch 33/1023
 1/17 [>.............................] - ETA: 0s - loss: 0.6047Restoring model weights from the end of the best epoch: 23.
17/17 [==============================] - 0s 3ms/step - loss: 0.5198 - val_loss: 1.0413
Epoch 33: early stopping
Wall time: 2.88 s
In [52]:
def plot_loss(history, lim = [0, 10]):
    plt.plot(history.history['loss'], label='loss')
    plt.plot(history.history['val_loss'], label='val_loss')
    plt.ylim(lim)
    plt.xlabel('Epoch')
    plt.ylabel('MSE')
    plt.legend()
    plt.grid(True)
In [53]:
plot_loss(history)
In [54]:
y_pred_mlp_1 = mlp_1.predict(x_test)
9/9 [==============================] - 0s 0s/step
In [55]:
a = plt.axes(aspect='equal')
plt.scatter(y_test, y_pred_mlp_1)
plt.xlabel('True Values')
plt.ylabel('Predictions')
lims = [0, 5]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims)
In [56]:
def actual_and_predicted_plot(orig, predict, var, model_name):    
    plt.figure(figsize=(17,5))
    plt.title(f'Тестовые и прогнозные значения: {model_name}')
    plt.plot(orig, label = 'Тест')
    plt.plot(predict, label = 'Прогноз')
    plt.legend(loc = 'best')
    plt.ylabel(var)
    plt.xlabel('Количество наблюдений')
    plt.show()
actual_and_predicted_plot(y_test.values, mlp_1.predict(x_test.values), 'Cоотношение матрица/наполнитель', 'mlp_1')
9/9 [==============================] - 0s 0s/step
In [57]:
df_mlp_1 = error(mlp_1, x_train, x_test, y_train, y_test,
    name = 'mlp_1', trg = 'Соотношение матрица-наполнитель')
df_mlp_1
9/9 [==============================] - 0s 2ms/step
9/9 [==============================] - 0s 0s/step
21/21 [==============================] - 0s 1ms/step
21/21 [==============================] - 0s 780us/step
9/9 [==============================] - 0s 2ms/step
Out[57]:
model Target param MAE(test) MAE(train) MSE(test) MSE(train) R_Squared
0 mlp_1 Соотношение матрица-наполнитель 0.84296 0.669361 1.039481 0.699295 -0.359761

Увеличим число слоев и изменим число нейронов¶

In [58]:
def build_and_compile_model(normalizer):
    model_2 = keras.Sequential([
      normalizer,
      layers.Dense(32, activation='relu'),
      layers.Dense(64, activation='relu'),
      layers.Dense(128, activation='relu'),  
      layers.Dense(1)
    ])

    model_2.compile(optimizer=tf.keras.optimizers.Adam(0.001),
                loss='mean_squared_error')
    return model_2
In [59]:
mlp_2 = build_and_compile_model(normalizer)
mlp_2.summary()
Model: "sequential_5"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 normalization (Normalizatio  (None, 12)               25        
 n)                                                              
                                                                 
 dense_7 (Dense)             (None, 32)                416       
                                                                 
 dense_8 (Dense)             (None, 64)                2112      
                                                                 
 dense_9 (Dense)             (None, 128)               8320      
                                                                 
 dense_10 (Dense)            (None, 1)                 129       
                                                                 
=================================================================
Total params: 11,002
Trainable params: 10,977
Non-trainable params: 25
_________________________________________________________________
In [60]:
%%time
history = mlp_2.fit(
    x_train,
    y_train,
    epochs=1000,
    verbose=1,
    callbacks=[callback],
    validation_split=0.2)
Epoch 1/1000
17/17 [==============================] - 1s 9ms/step - loss: 5.5147 - val_loss: 1.6071
Epoch 2/1000
17/17 [==============================] - 0s 4ms/step - loss: 1.6149 - val_loss: 1.2280
Epoch 3/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.2646 - val_loss: 1.2105
Epoch 4/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.1512 - val_loss: 1.1629
Epoch 5/1000
17/17 [==============================] - 0s 4ms/step - loss: 1.0713 - val_loss: 1.1450
Epoch 6/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.0210 - val_loss: 1.1339
Epoch 7/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9797 - val_loss: 1.1184
Epoch 8/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9752 - val_loss: 1.1212
Epoch 9/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9221 - val_loss: 1.1315
Epoch 10/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9020 - val_loss: 1.1117
Epoch 11/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8602 - val_loss: 1.1070
Epoch 12/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8411 - val_loss: 1.1036
Epoch 13/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8160 - val_loss: 1.0795
Epoch 14/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7929 - val_loss: 1.0648
Epoch 15/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7787 - val_loss: 1.0984
Epoch 16/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7508 - val_loss: 1.0994
Epoch 17/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7431 - val_loss: 1.0785
Epoch 18/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7138 - val_loss: 1.0711
Epoch 19/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6964 - val_loss: 1.1020
Epoch 20/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6733 - val_loss: 1.1177
Epoch 21/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6807 - val_loss: 1.0597
Epoch 22/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6355 - val_loss: 1.0396
Epoch 23/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6316 - val_loss: 1.0658
Epoch 24/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6034 - val_loss: 1.0478
Epoch 25/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.5880 - val_loss: 1.0587
Epoch 26/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.5769 - val_loss: 1.0443
Epoch 27/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.5673 - val_loss: 1.0462
Epoch 28/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.5323 - val_loss: 1.0634
Epoch 29/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.5096 - val_loss: 1.0591
Epoch 30/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.5089 - val_loss: 1.0785
Epoch 31/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.4899 - val_loss: 1.0797
Epoch 32/1000
 1/17 [>.............................] - ETA: 0s - loss: 0.4320Restoring model weights from the end of the best epoch: 22.
17/17 [==============================] - 0s 4ms/step - loss: 0.4845 - val_loss: 1.0935
Epoch 32: early stopping
Wall time: 2.45 s
In [61]:
def plot_loss(history, lim = [0, 10]):
    plt.plot(history.history['loss'], label='loss')
    plt.plot(history.history['val_loss'], label='val_loss')
    plt.ylim(lim)
    plt.xlabel('Epoch')
    plt.ylabel('MSE')
    plt.legend()
    plt.grid(True)
In [62]:
plot_loss(history)
In [63]:
y_pred_mlp_2 = mlp_2.predict(x_test)
9/9 [==============================] - 0s 0s/step
In [64]:
a = plt.axes(aspect='equal')
plt.scatter(y_test, y_pred_mlp_2)
plt.xlabel('True Values')
plt.ylabel('Predictions')
lims = [0, 5]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims)
In [65]:
def actual_and_predicted_plot(orig, predict, var, model_name):    
    plt.figure(figsize=(17,5))
    plt.title(f'Тестовые и прогнозные значения: {model_name}')
    plt.plot(orig, label = 'Тест')
    plt.plot(predict, label = 'Прогноз')
    plt.legend(loc = 'best')
    plt.ylabel(var)
    plt.xlabel('Количество наблюдений')
    plt.show()
actual_and_predicted_plot(y_test.values, mlp_2.predict(x_test.values), 'Cоотношение матрица/наполнитель', 'mlp_2')
9/9 [==============================] - 0s 2ms/step
In [66]:
df_mlp_2 = error(mlp_2, x_train, x_test, y_train, y_test,
    name = 'mlp_2', trg = 'Соотношение матрица-наполнитель')
df_mlp_2
9/9 [==============================] - 0s 0s/step
9/9 [==============================] - 0s 813us/step
21/21 [==============================] - 0s 2ms/step
21/21 [==============================] - 0s 779us/step
9/9 [==============================] - 0s 0s/step
Out[66]:
model Target param MAE(test) MAE(train) MSE(test) MSE(train) R_Squared
0 mlp_2 Соотношение матрица-наполнитель 0.861006 0.671457 1.105257 0.699097 -0.445803

число слоев оставим , изменим число нейронов¶

In [67]:
def build_and_compile_model(normalizer):
    model_3 = keras.Sequential([
      normalizer,
      layers.Dense(64, activation='relu'),
      layers.Dense(128, activation='relu'),
      layers.Dense(256, activation='relu'),  
      layers.Dense(1)
    ])

    model_3.compile(optimizer=tf.keras.optimizers.Adam(0.001),
                loss='mean_squared_error')
    return model_3
In [68]:
mlp_3 = build_and_compile_model(normalizer)
mlp_3.summary()
Model: "sequential_6"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 normalization (Normalizatio  (None, 12)               25        
 n)                                                              
                                                                 
 dense_11 (Dense)            (None, 64)                832       
                                                                 
 dense_12 (Dense)            (None, 128)               8320      
                                                                 
 dense_13 (Dense)            (None, 256)               33024     
                                                                 
 dense_14 (Dense)            (None, 1)                 257       
                                                                 
=================================================================
Total params: 42,458
Trainable params: 42,433
Non-trainable params: 25
_________________________________________________________________
In [69]:
%%time
history = mlp_3.fit(
    x_train,
    y_train,
    epochs=1000,
    verbose=1,
    callbacks=[callback],
    validation_split=0.2)
Epoch 1/1000
17/17 [==============================] - 1s 10ms/step - loss: 4.3518 - val_loss: 1.6544
Epoch 2/1000
17/17 [==============================] - 0s 4ms/step - loss: 1.3188 - val_loss: 1.3009
Epoch 3/1000
17/17 [==============================] - 0s 4ms/step - loss: 1.0151 - val_loss: 1.1684
Epoch 4/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9381 - val_loss: 1.1451
Epoch 5/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8926 - val_loss: 1.1292
Epoch 6/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8554 - val_loss: 1.1356
Epoch 7/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8224 - val_loss: 1.1085
Epoch 8/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7852 - val_loss: 1.1865
Epoch 9/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.7647 - val_loss: 1.1318
Epoch 10/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.7253 - val_loss: 1.0708
Epoch 11/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7012 - val_loss: 1.1466
Epoch 12/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6783 - val_loss: 1.0759
Epoch 13/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6354 - val_loss: 1.0741
Epoch 14/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.5936 - val_loss: 1.0732
Epoch 15/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.5822 - val_loss: 1.0957
Epoch 16/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.5106 - val_loss: 1.0842
Epoch 17/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.5341 - val_loss: 1.1047
Epoch 18/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.4762 - val_loss: 1.0864
Epoch 19/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.4460 - val_loss: 1.0334
Epoch 20/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.3940 - val_loss: 1.0881
Epoch 21/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.3696 - val_loss: 1.0527
Epoch 22/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.3379 - val_loss: 1.0594
Epoch 23/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.3212 - val_loss: 1.0409
Epoch 24/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.2997 - val_loss: 1.1100
Epoch 25/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.2755 - val_loss: 1.0667
Epoch 26/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.2582 - val_loss: 1.1088
Epoch 27/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.2266 - val_loss: 1.1217
Epoch 28/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.2247 - val_loss: 1.0882
Epoch 29/1000
 1/17 [>.............................] - ETA: 0s - loss: 0.2313Restoring model weights from the end of the best epoch: 19.
17/17 [==============================] - 0s 4ms/step - loss: 0.2105 - val_loss: 1.1704
Epoch 29: early stopping
Wall time: 2.69 s
In [70]:
def plot_loss(history, lim = [0, 10]):
    plt.plot(history.history['loss'], label='loss')
    plt.plot(history.history['val_loss'], label='val_loss')
    plt.ylim(lim)
    plt.xlabel('Epoch')
    plt.ylabel('MSE')
    plt.legend()
    plt.grid(True)
In [71]:
plot_loss(history)
In [72]:
y_pred_mlp_3 = mlp_3.predict(x_test)
9/9 [==============================] - 0s 0s/step
In [73]:
a = plt.axes(aspect='equal')
plt.scatter(y_test, y_pred_mlp_3)
plt.xlabel('True Values')
plt.ylabel('Predictions')
lims = [0, 5]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims)
In [74]:
def actual_and_predicted_plot(orig, predict, var, model_name):    
    plt.figure(figsize=(17,5))
    plt.title(f'Тестовые и прогнозные значения: {model_name}')
    plt.plot(orig, label = 'Тест')
    plt.plot(predict, label = 'Прогноз')
    plt.legend(loc = 'best')
    plt.ylabel(var)
    plt.xlabel('Количество наблюдений')
    plt.show()
actual_and_predicted_plot(y_test.values, mlp_3.predict(x_test.values), 'Cоотношение матрица/наполнитель', 'mlp_3')
9/9 [==============================] - 0s 0s/step
In [75]:
df_mlp_3 = error(mlp_3, x_train, x_test, y_train, y_test,
    name = 'mlp_3', trg = 'Соотношение матрица-наполнитель')
df_mlp_3
9/9 [==============================] - 0s 2ms/step
9/9 [==============================] - 0s 2ms/step
21/21 [==============================] - 0s 780us/step
21/21 [==============================] - 0s 1ms/step
9/9 [==============================] - 0s 2ms/step
Out[75]:
model Target param MAE(test) MAE(train) MSE(test) MSE(train) R_Squared
0 mlp_3 Соотношение матрица-наполнитель 0.868866 0.560671 1.12006 0.513567 -0.465168

Построим многослойную модель с теми же параметрами, но изменим оптимизатор на SGD¶

In [76]:
def build_and_compile_model(normalizer):
    model_4 = keras.Sequential([
      normalizer,
      layers.Dense(64, activation='relu'),
      layers.Dense(64, activation='relu'),
      layers.Dense(1)
    ])

    model_4.compile(optimizer=tf.keras.optimizers.SGD(0.001),
                loss='mean_squared_error')
    return model_4
In [77]:
mlp_4 = build_and_compile_model(normalizer)
mlp_4.summary()
Model: "sequential_7"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 normalization (Normalizatio  (None, 12)               25        
 n)                                                              
                                                                 
 dense_15 (Dense)            (None, 64)                832       
                                                                 
 dense_16 (Dense)            (None, 64)                4160      
                                                                 
 dense_17 (Dense)            (None, 1)                 65        
                                                                 
=================================================================
Total params: 5,082
Trainable params: 5,057
Non-trainable params: 25
_________________________________________________________________
In [78]:
%%time
history = mlp_4.fit(
    x_train,
    y_train,
    epochs=1000,
    verbose=1,
    callbacks=[callback],
    validation_split=0.2)
Epoch 1/1000
17/17 [==============================] - 0s 8ms/step - loss: 8.7493 - val_loss: 6.9267
Epoch 2/1000
17/17 [==============================] - 0s 2ms/step - loss: 5.9389 - val_loss: 4.6762
Epoch 3/1000
17/17 [==============================] - 0s 2ms/step - loss: 4.0648 - val_loss: 3.1731
Epoch 4/1000
17/17 [==============================] - 0s 3ms/step - loss: 2.8434 - val_loss: 2.2295
Epoch 5/1000
17/17 [==============================] - 0s 2ms/step - loss: 2.0979 - val_loss: 1.6985
Epoch 6/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.6854 - val_loss: 1.4053
Epoch 7/1000
17/17 [==============================] - 0s 2ms/step - loss: 1.4537 - val_loss: 1.2685
Epoch 8/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.3366 - val_loss: 1.1985
Epoch 9/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.2689 - val_loss: 1.1575
Epoch 10/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.2214 - val_loss: 1.1326
Epoch 11/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.1849 - val_loss: 1.1135
Epoch 12/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.1556 - val_loss: 1.1045
Epoch 13/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.1314 - val_loss: 1.0934
Epoch 14/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.1100 - val_loss: 1.0837
Epoch 15/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.0896 - val_loss: 1.0777
Epoch 16/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.0728 - val_loss: 1.0719
Epoch 17/1000
17/17 [==============================] - 0s 2ms/step - loss: 1.0575 - val_loss: 1.0666
Epoch 18/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.0421 - val_loss: 1.0624
Epoch 19/1000
17/17 [==============================] - 0s 2ms/step - loss: 1.0294 - val_loss: 1.0581
Epoch 20/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.0181 - val_loss: 1.0560
Epoch 21/1000
17/17 [==============================] - 0s 2ms/step - loss: 1.0061 - val_loss: 1.0534
Epoch 22/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9960 - val_loss: 1.0522
Epoch 23/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9868 - val_loss: 1.0513
Epoch 24/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9787 - val_loss: 1.0501
Epoch 25/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9708 - val_loss: 1.0485
Epoch 26/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9646 - val_loss: 1.0470
Epoch 27/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9582 - val_loss: 1.0454
Epoch 28/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9514 - val_loss: 1.0453
Epoch 29/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9449 - val_loss: 1.0443
Epoch 30/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9389 - val_loss: 1.0430
Epoch 31/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.9350 - val_loss: 1.0432
Epoch 32/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9301 - val_loss: 1.0420
Epoch 33/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.9252 - val_loss: 1.0422
Epoch 34/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9214 - val_loss: 1.0409
Epoch 35/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.9172 - val_loss: 1.0405
Epoch 36/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9128 - val_loss: 1.0401
Epoch 37/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.9087 - val_loss: 1.0413
Epoch 38/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.9050 - val_loss: 1.0403
Epoch 39/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.9021 - val_loss: 1.0405
Epoch 40/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8981 - val_loss: 1.0394
Epoch 41/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.8945 - val_loss: 1.0411
Epoch 42/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8913 - val_loss: 1.0398
Epoch 43/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8889 - val_loss: 1.0390
Epoch 44/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8853 - val_loss: 1.0389
Epoch 45/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8826 - val_loss: 1.0409
Epoch 46/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8806 - val_loss: 1.0386
Epoch 47/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8772 - val_loss: 1.0370
Epoch 48/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8749 - val_loss: 1.0363
Epoch 49/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8723 - val_loss: 1.0354
Epoch 50/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8703 - val_loss: 1.0356
Epoch 51/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8673 - val_loss: 1.0350
Epoch 52/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8653 - val_loss: 1.0378
Epoch 53/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8636 - val_loss: 1.0359
Epoch 54/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.8602 - val_loss: 1.0354
Epoch 55/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8588 - val_loss: 1.0329
Epoch 56/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8564 - val_loss: 1.0324
Epoch 57/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8554 - val_loss: 1.0324
Epoch 58/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8522 - val_loss: 1.0323
Epoch 59/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8498 - val_loss: 1.0329
Epoch 60/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.8475 - val_loss: 1.0327
Epoch 61/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8459 - val_loss: 1.0305
Epoch 62/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.8441 - val_loss: 1.0306
Epoch 63/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8417 - val_loss: 1.0300
Epoch 64/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.8399 - val_loss: 1.0297
Epoch 65/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.8385 - val_loss: 1.0298
Epoch 66/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.8364 - val_loss: 1.0266
Epoch 67/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.8345 - val_loss: 1.0259
Epoch 68/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.8332 - val_loss: 1.0268
Epoch 69/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8310 - val_loss: 1.0268
Epoch 70/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.8289 - val_loss: 1.0260
Epoch 71/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8273 - val_loss: 1.0253
Epoch 72/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8254 - val_loss: 1.0251
Epoch 73/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8239 - val_loss: 1.0279
Epoch 74/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.8224 - val_loss: 1.0256
Epoch 75/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.8203 - val_loss: 1.0244
Epoch 76/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.8190 - val_loss: 1.0256
Epoch 77/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8173 - val_loss: 1.0237
Epoch 78/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8163 - val_loss: 1.0244
Epoch 79/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8136 - val_loss: 1.0243
Epoch 80/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8126 - val_loss: 1.0253
Epoch 81/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8116 - val_loss: 1.0226
Epoch 82/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.8096 - val_loss: 1.0239
Epoch 83/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8081 - val_loss: 1.0205
Epoch 84/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.8062 - val_loss: 1.0208
Epoch 85/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8051 - val_loss: 1.0192
Epoch 86/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8038 - val_loss: 1.0178
Epoch 87/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8017 - val_loss: 1.0172
Epoch 88/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8005 - val_loss: 1.0183
Epoch 89/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7997 - val_loss: 1.0145
Epoch 90/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7980 - val_loss: 1.0139
Epoch 91/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.7963 - val_loss: 1.0155
Epoch 92/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7957 - val_loss: 1.0140
Epoch 93/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7940 - val_loss: 1.0128
Epoch 94/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7926 - val_loss: 1.0097
Epoch 95/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7913 - val_loss: 1.0100
Epoch 96/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7900 - val_loss: 1.0101
Epoch 97/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7881 - val_loss: 1.0102
Epoch 98/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7868 - val_loss: 1.0085
Epoch 99/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7854 - val_loss: 1.0093
Epoch 100/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7845 - val_loss: 1.0080
Epoch 101/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7830 - val_loss: 1.0071
Epoch 102/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7817 - val_loss: 1.0062
Epoch 103/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7805 - val_loss: 1.0061
Epoch 104/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7790 - val_loss: 1.0059
Epoch 105/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7782 - val_loss: 1.0058
Epoch 106/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7765 - val_loss: 1.0079
Epoch 107/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7758 - val_loss: 1.0055
Epoch 108/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7747 - val_loss: 1.0037
Epoch 109/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7732 - val_loss: 1.0027
Epoch 110/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7719 - val_loss: 1.0023
Epoch 111/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7710 - val_loss: 1.0045
Epoch 112/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7697 - val_loss: 1.0042
Epoch 113/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7688 - val_loss: 1.0060
Epoch 114/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7672 - val_loss: 1.0048
Epoch 115/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7667 - val_loss: 1.0041
Epoch 116/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7651 - val_loss: 1.0037
Epoch 117/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7636 - val_loss: 1.0049
Epoch 118/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7631 - val_loss: 1.0027
Epoch 119/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7618 - val_loss: 1.0008
Epoch 120/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7603 - val_loss: 1.0010
Epoch 121/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7596 - val_loss: 1.0011
Epoch 122/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7581 - val_loss: 1.0015
Epoch 123/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7569 - val_loss: 0.9993
Epoch 124/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7562 - val_loss: 0.9990
Epoch 125/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7551 - val_loss: 0.9996
Epoch 126/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7535 - val_loss: 1.0002
Epoch 127/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7527 - val_loss: 1.0014
Epoch 128/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7521 - val_loss: 1.0014
Epoch 129/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7511 - val_loss: 0.9989
Epoch 130/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7499 - val_loss: 0.9989
Epoch 131/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7487 - val_loss: 0.9994
Epoch 132/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7476 - val_loss: 1.0017
Epoch 133/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7468 - val_loss: 1.0008
Epoch 134/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7454 - val_loss: 0.9998
Epoch 135/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7442 - val_loss: 1.0014
Epoch 136/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7436 - val_loss: 1.0008
Epoch 137/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7421 - val_loss: 1.0015
Epoch 138/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7415 - val_loss: 1.0017
Epoch 139/1000
 1/17 [>.............................] - ETA: 0s - loss: 0.8821Restoring model weights from the end of the best epoch: 129.
17/17 [==============================] - 0s 3ms/step - loss: 0.7401 - val_loss: 1.0038
Epoch 139: early stopping
Wall time: 7.37 s
In [79]:
def plot_loss(history, lim = [0, 10]):
    plt.plot(history.history['loss'], label='loss')
    plt.plot(history.history['val_loss'], label='val_loss')
    plt.ylim(lim)
    plt.xlabel('Epoch')
    plt.ylabel('MSE')
    plt.legend()
    plt.grid(True)
plot_loss(history)   
In [80]:
y_pred_mlp_4 = mlp_4.predict(x_test)
9/9 [==============================] - 0s 1ms/step
In [81]:
# Зададим функцию для визуализации факт/прогноз для результатов моделей
# Посмотрим на график результата работы модели
def actual_and_predicted_plot(orig, predict, var, model_name):    
    plt.figure(figsize=(17,5))
    plt.title(f'Тестовые и прогнозные значения: {model_name}')
    plt.plot(orig, label = 'Тест')
    plt.plot(predict, label = 'Прогноз')
    plt.legend(loc = 'best')
    plt.ylabel(var)
    plt.xlabel('Количество наблюдений')
    plt.show()
actual_and_predicted_plot(y_test.values, mlp_4.predict(x_test.values), 'Cоотношение матрица/наполнитель', 'mlp_4')
9/9 [==============================] - 0s 0s/step
In [82]:
a = plt.axes(aspect='equal')
plt.scatter(y_test,y_pred_mlp_4)
plt.xlabel('True Values')
plt.ylabel('Predictions')
lims = [0, 5]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims)
In [83]:
df_mlp_4 = error(mlp_4, x_train, x_test, y_train, y_test,
    name = 'mlp_4', trg = 'Соотношение матрица-наполнитель')
df_mlp_4
9/9 [==============================] - 0s 2ms/step
9/9 [==============================] - 0s 0s/step
21/21 [==============================] - 0s 779us/step
21/21 [==============================] - 0s 2ms/step
9/9 [==============================] - 0s 2ms/step
Out[83]:
model Target param MAE(test) MAE(train) MSE(test) MSE(train) R_Squared
0 mlp_4 Соотношение матрица-наполнитель 0.821671 0.714738 0.987026 0.798285 -0.291144
In [84]:
def build_and_compile_model(normalizer):
    model_5 = keras.Sequential([
      normalizer,
      layers.Dense(32, activation='relu'),
      layers.Dense(64, activation='relu'),
      layers.Dense(128, activation='relu'),  
      layers.Dense(1)
    ])

    model_5.compile(optimizer=tf.keras.optimizers.SGD(0.001),
                loss='mean_squared_error')
    return model_5
In [85]:
mlp_5 = build_and_compile_model(normalizer)
mlp_5.summary()
Model: "sequential_8"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 normalization (Normalizatio  (None, 12)               25        
 n)                                                              
                                                                 
 dense_18 (Dense)            (None, 32)                416       
                                                                 
 dense_19 (Dense)            (None, 64)                2112      
                                                                 
 dense_20 (Dense)            (None, 128)               8320      
                                                                 
 dense_21 (Dense)            (None, 1)                 129       
                                                                 
=================================================================
Total params: 11,002
Trainable params: 10,977
Non-trainable params: 25
_________________________________________________________________
In [86]:
%%time
history = mlp_5.fit(
    x_train,
    y_train,
    epochs=1000,
    verbose=1,
    callbacks=[callback],
    validation_split=0.2)
Epoch 1/1000
17/17 [==============================] - 1s 10ms/step - loss: 7.6742 - val_loss: 6.2819
Epoch 2/1000
17/17 [==============================] - 0s 4ms/step - loss: 5.2226 - val_loss: 4.2336
Epoch 3/1000
17/17 [==============================] - 0s 4ms/step - loss: 3.4512 - val_loss: 2.7746
Epoch 4/1000
17/17 [==============================] - 0s 3ms/step - loss: 2.2555 - val_loss: 1.9634
Epoch 5/1000
17/17 [==============================] - 0s 4ms/step - loss: 1.6296 - val_loss: 1.6093
Epoch 6/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.3667 - val_loss: 1.4562
Epoch 7/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.2526 - val_loss: 1.3925
Epoch 8/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.2029 - val_loss: 1.3635
Epoch 9/1000
17/17 [==============================] - 0s 4ms/step - loss: 1.1773 - val_loss: 1.3407
Epoch 10/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.1561 - val_loss: 1.3232
Epoch 11/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.1404 - val_loss: 1.3081
Epoch 12/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.1264 - val_loss: 1.2954
Epoch 13/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.1146 - val_loss: 1.2830
Epoch 14/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.1035 - val_loss: 1.2699
Epoch 15/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.0914 - val_loss: 1.2590
Epoch 16/1000
17/17 [==============================] - 0s 2ms/step - loss: 1.0809 - val_loss: 1.2469
Epoch 17/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.0719 - val_loss: 1.2384
Epoch 18/1000
17/17 [==============================] - 0s 2ms/step - loss: 1.0630 - val_loss: 1.2300
Epoch 19/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.0542 - val_loss: 1.2237
Epoch 20/1000
17/17 [==============================] - 0s 2ms/step - loss: 1.0473 - val_loss: 1.2145
Epoch 21/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.0383 - val_loss: 1.2077
Epoch 22/1000
17/17 [==============================] - 0s 2ms/step - loss: 1.0314 - val_loss: 1.2024
Epoch 23/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.0246 - val_loss: 1.1964
Epoch 24/1000
17/17 [==============================] - 0s 2ms/step - loss: 1.0191 - val_loss: 1.1899
Epoch 25/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.0115 - val_loss: 1.1844
Epoch 26/1000
17/17 [==============================] - 0s 2ms/step - loss: 1.0053 - val_loss: 1.1793
Epoch 27/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9993 - val_loss: 1.1748
Epoch 28/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9934 - val_loss: 1.1704
Epoch 29/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9879 - val_loss: 1.1643
Epoch 30/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9822 - val_loss: 1.1590
Epoch 31/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.9769 - val_loss: 1.1531
Epoch 32/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9719 - val_loss: 1.1494
Epoch 33/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9671 - val_loss: 1.1459
Epoch 34/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9623 - val_loss: 1.1433
Epoch 35/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9585 - val_loss: 1.1383
Epoch 36/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9528 - val_loss: 1.1342
Epoch 37/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9475 - val_loss: 1.1300
Epoch 38/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9434 - val_loss: 1.1264
Epoch 39/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9395 - val_loss: 1.1220
Epoch 40/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9355 - val_loss: 1.1196
Epoch 41/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9315 - val_loss: 1.1159
Epoch 42/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9281 - val_loss: 1.1141
Epoch 43/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9257 - val_loss: 1.1105
Epoch 44/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9212 - val_loss: 1.1073
Epoch 45/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9180 - val_loss: 1.1044
Epoch 46/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.9143 - val_loss: 1.1030
Epoch 47/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9116 - val_loss: 1.1003
Epoch 48/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9075 - val_loss: 1.0971
Epoch 49/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9035 - val_loss: 1.0954
Epoch 50/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9012 - val_loss: 1.0950
Epoch 51/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.8995 - val_loss: 1.0898
Epoch 52/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8938 - val_loss: 1.0861
Epoch 53/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.8906 - val_loss: 1.0836
Epoch 54/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8880 - val_loss: 1.0797
Epoch 55/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.8852 - val_loss: 1.0786
Epoch 56/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8844 - val_loss: 1.0748
Epoch 57/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8796 - val_loss: 1.0728
Epoch 58/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8764 - val_loss: 1.0717
Epoch 59/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8742 - val_loss: 1.0692
Epoch 60/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8716 - val_loss: 1.0680
Epoch 61/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8696 - val_loss: 1.0648
Epoch 62/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8659 - val_loss: 1.0617
Epoch 63/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8638 - val_loss: 1.0624
Epoch 64/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8620 - val_loss: 1.0590
Epoch 65/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8588 - val_loss: 1.0586
Epoch 66/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8568 - val_loss: 1.0572
Epoch 67/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8543 - val_loss: 1.0552
Epoch 68/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8517 - val_loss: 1.0543
Epoch 69/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8514 - val_loss: 1.0514
Epoch 70/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8477 - val_loss: 1.0498
Epoch 71/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8458 - val_loss: 1.0474
Epoch 72/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8437 - val_loss: 1.0464
Epoch 73/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8411 - val_loss: 1.0452
Epoch 74/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8389 - val_loss: 1.0442
Epoch 75/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8369 - val_loss: 1.0433
Epoch 76/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8352 - val_loss: 1.0416
Epoch 77/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.8331 - val_loss: 1.0398
Epoch 78/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8321 - val_loss: 1.0396
Epoch 79/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8308 - val_loss: 1.0376
Epoch 80/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8285 - val_loss: 1.0356
Epoch 81/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8267 - val_loss: 1.0331
Epoch 82/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8246 - val_loss: 1.0314
Epoch 83/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8235 - val_loss: 1.0278
Epoch 84/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8203 - val_loss: 1.0272
Epoch 85/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8187 - val_loss: 1.0254
Epoch 86/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8173 - val_loss: 1.0239
Epoch 87/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8149 - val_loss: 1.0222
Epoch 88/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8134 - val_loss: 1.0223
Epoch 89/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8128 - val_loss: 1.0188
Epoch 90/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8101 - val_loss: 1.0181
Epoch 91/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8093 - val_loss: 1.0177
Epoch 92/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8076 - val_loss: 1.0158
Epoch 93/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8058 - val_loss: 1.0147
Epoch 94/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8048 - val_loss: 1.0136
Epoch 95/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.8038 - val_loss: 1.0134
Epoch 96/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8022 - val_loss: 1.0115
Epoch 97/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.8003 - val_loss: 1.0105
Epoch 98/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7984 - val_loss: 1.0100
Epoch 99/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7978 - val_loss: 1.0097
Epoch 100/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7959 - val_loss: 1.0086
Epoch 101/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7941 - val_loss: 1.0074
Epoch 102/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7925 - val_loss: 1.0068
Epoch 103/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7914 - val_loss: 1.0062
Epoch 104/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7903 - val_loss: 1.0051
Epoch 105/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7887 - val_loss: 1.0040
Epoch 106/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7876 - val_loss: 1.0024
Epoch 107/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7860 - val_loss: 1.0012
Epoch 108/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7857 - val_loss: 1.0001
Epoch 109/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7833 - val_loss: 0.9990
Epoch 110/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7830 - val_loss: 0.9986
Epoch 111/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7814 - val_loss: 0.9973
Epoch 112/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7801 - val_loss: 0.9969
Epoch 113/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7782 - val_loss: 0.9971
Epoch 114/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7773 - val_loss: 0.9964
Epoch 115/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7760 - val_loss: 0.9958
Epoch 116/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7749 - val_loss: 0.9963
Epoch 117/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7743 - val_loss: 0.9962
Epoch 118/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7732 - val_loss: 0.9937
Epoch 119/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7714 - val_loss: 0.9931
Epoch 120/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7696 - val_loss: 0.9927
Epoch 121/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7685 - val_loss: 0.9918
Epoch 122/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7678 - val_loss: 0.9910
Epoch 123/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7675 - val_loss: 0.9905
Epoch 124/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7651 - val_loss: 0.9897
Epoch 125/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7645 - val_loss: 0.9873
Epoch 126/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7631 - val_loss: 0.9844
Epoch 127/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7621 - val_loss: 0.9836
Epoch 128/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7609 - val_loss: 0.9837
Epoch 129/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7601 - val_loss: 0.9840
Epoch 130/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7587 - val_loss: 0.9844
Epoch 131/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7576 - val_loss: 0.9838
Epoch 132/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7571 - val_loss: 0.9836
Epoch 133/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7557 - val_loss: 0.9831
Epoch 134/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7546 - val_loss: 0.9820
Epoch 135/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7533 - val_loss: 0.9811
Epoch 136/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7519 - val_loss: 0.9800
Epoch 137/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7508 - val_loss: 0.9798
Epoch 138/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7499 - val_loss: 0.9785
Epoch 139/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7493 - val_loss: 0.9782
Epoch 140/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7484 - val_loss: 0.9778
Epoch 141/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7471 - val_loss: 0.9759
Epoch 142/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7461 - val_loss: 0.9756
Epoch 143/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7463 - val_loss: 0.9755
Epoch 144/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7449 - val_loss: 0.9726
Epoch 145/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7442 - val_loss: 0.9725
Epoch 146/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7423 - val_loss: 0.9724
Epoch 147/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7411 - val_loss: 0.9724
Epoch 148/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7404 - val_loss: 0.9728
Epoch 149/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.7392 - val_loss: 0.9723
Epoch 150/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7391 - val_loss: 0.9721
Epoch 151/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7374 - val_loss: 0.9704
Epoch 152/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7366 - val_loss: 0.9711
Epoch 153/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7358 - val_loss: 0.9713
Epoch 154/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.7354 - val_loss: 0.9718
Epoch 155/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7346 - val_loss: 0.9709
Epoch 156/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7334 - val_loss: 0.9713
Epoch 157/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7328 - val_loss: 0.9696
Epoch 158/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7312 - val_loss: 0.9689
Epoch 159/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7309 - val_loss: 0.9657
Epoch 160/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7304 - val_loss: 0.9657
Epoch 161/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7290 - val_loss: 0.9653
Epoch 162/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7279 - val_loss: 0.9653
Epoch 163/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7270 - val_loss: 0.9638
Epoch 164/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7265 - val_loss: 0.9626
Epoch 165/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7257 - val_loss: 0.9623
Epoch 166/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.7246 - val_loss: 0.9620
Epoch 167/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7233 - val_loss: 0.9611
Epoch 168/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.7233 - val_loss: 0.9615
Epoch 169/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7226 - val_loss: 0.9601
Epoch 170/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7214 - val_loss: 0.9598
Epoch 171/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7208 - val_loss: 0.9597
Epoch 172/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7192 - val_loss: 0.9607
Epoch 173/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7189 - val_loss: 0.9621
Epoch 174/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7186 - val_loss: 0.9609
Epoch 175/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7172 - val_loss: 0.9603
Epoch 176/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7165 - val_loss: 0.9603
Epoch 177/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7159 - val_loss: 0.9589
Epoch 178/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.7148 - val_loss: 0.9574
Epoch 179/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7141 - val_loss: 0.9586
Epoch 180/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7133 - val_loss: 0.9578
Epoch 181/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7123 - val_loss: 0.9559
Epoch 182/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7115 - val_loss: 0.9561
Epoch 183/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.7113 - val_loss: 0.9555
Epoch 184/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7100 - val_loss: 0.9555
Epoch 185/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7092 - val_loss: 0.9561
Epoch 186/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7083 - val_loss: 0.9563
Epoch 187/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7079 - val_loss: 0.9550
Epoch 188/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7071 - val_loss: 0.9546
Epoch 189/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7061 - val_loss: 0.9549
Epoch 190/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7061 - val_loss: 0.9535
Epoch 191/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7043 - val_loss: 0.9515
Epoch 192/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.7039 - val_loss: 0.9519
Epoch 193/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7036 - val_loss: 0.9508
Epoch 194/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7024 - val_loss: 0.9491
Epoch 195/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7017 - val_loss: 0.9484
Epoch 196/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7010 - val_loss: 0.9481
Epoch 197/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7002 - val_loss: 0.9479
Epoch 198/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6995 - val_loss: 0.9466
Epoch 199/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.6985 - val_loss: 0.9470
Epoch 200/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6985 - val_loss: 0.9462
Epoch 201/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6973 - val_loss: 0.9466
Epoch 202/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6971 - val_loss: 0.9459
Epoch 203/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6959 - val_loss: 0.9460
Epoch 204/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6948 - val_loss: 0.9463
Epoch 205/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6942 - val_loss: 0.9453
Epoch 206/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6938 - val_loss: 0.9453
Epoch 207/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6928 - val_loss: 0.9449
Epoch 208/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6925 - val_loss: 0.9448
Epoch 209/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6911 - val_loss: 0.9454
Epoch 210/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6910 - val_loss: 0.9447
Epoch 211/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.6902 - val_loss: 0.9444
Epoch 212/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6901 - val_loss: 0.9434
Epoch 213/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.6890 - val_loss: 0.9428
Epoch 214/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6881 - val_loss: 0.9411
Epoch 215/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6871 - val_loss: 0.9399
Epoch 216/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6874 - val_loss: 0.9393
Epoch 217/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6857 - val_loss: 0.9395
Epoch 218/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6854 - val_loss: 0.9394
Epoch 219/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6846 - val_loss: 0.9396
Epoch 220/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6841 - val_loss: 0.9391
Epoch 221/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6835 - val_loss: 0.9388
Epoch 222/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6826 - val_loss: 0.9386
Epoch 223/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6817 - val_loss: 0.9384
Epoch 224/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6810 - val_loss: 0.9374
Epoch 225/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6809 - val_loss: 0.9377
Epoch 226/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.6796 - val_loss: 0.9383
Epoch 227/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6791 - val_loss: 0.9375
Epoch 228/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6789 - val_loss: 0.9373
Epoch 229/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6780 - val_loss: 0.9363
Epoch 230/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6771 - val_loss: 0.9371
Epoch 231/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.6766 - val_loss: 0.9367
Epoch 232/1000
17/17 [==============================] - 0s 2ms/step - loss: 0.6756 - val_loss: 0.9379
Epoch 233/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6751 - val_loss: 0.9371
Epoch 234/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6745 - val_loss: 0.9367
Epoch 235/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6734 - val_loss: 0.9374
Epoch 236/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6731 - val_loss: 0.9375
Epoch 237/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6725 - val_loss: 0.9377
Epoch 238/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6719 - val_loss: 0.9389
Epoch 239/1000
 1/17 [>.............................] - ETA: 0s - loss: 0.6955Restoring model weights from the end of the best epoch: 229.
17/17 [==============================] - 0s 3ms/step - loss: 0.6719 - val_loss: 0.9383
Epoch 239: early stopping
Wall time: 13.1 s
In [87]:
def plot_loss(history, lim = [0, 10]):
    plt.plot(history.history['loss'], label='loss')
    plt.plot(history.history['val_loss'], label='val_loss')
    plt.ylim(lim)
    plt.xlabel('Epoch')
    plt.ylabel('MSE')
    plt.legend()
    plt.grid(True)
plot_loss(history)
In [88]:
y_pred_mlp_5 = mlp_5.predict(x_test)
9/9 [==============================] - 0s 813us/step
In [89]:
a = plt.axes(aspect='equal')
plt.scatter(y_test,y_pred_mlp_5 )
plt.xlabel('True Values')
plt.ylabel('Predictions')
lims = [0, 5]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims)
In [90]:
def actual_and_predicted_plot(orig, predict, var, model_name):    
    plt.figure(figsize=(17,5))
    plt.title(f'Тестовые и прогнозные значения: {model_name}')
    plt.plot(orig, label = 'Тест')
    plt.plot(predict, label = 'Прогноз')
    plt.legend(loc = 'best')
    plt.ylabel(var)
    plt.xlabel('Количество наблюдений')
    plt.show()
actual_and_predicted_plot(y_test.values, mlp_5.predict(x_test.values), 'Cоотношение матрица/наполнитель', 'mlp_5')
9/9 [==============================] - 0s 2ms/step
In [91]:
df_mlp_5 = error(mlp_5, x_train, x_test, y_train, y_test,
    name = 'mlp_5', trg = 'Соотношение матрица-наполнитель')
df_mlp_5
9/9 [==============================] - 0s 2ms/step
9/9 [==============================] - 0s 0s/step
21/21 [==============================] - 0s 781us/step
21/21 [==============================] - 0s 1ms/step
9/9 [==============================] - 0s 2ms/step
Out[91]:
model Target param MAE(test) MAE(train) MSE(test) MSE(train) R_Squared
0 mlp_5 Соотношение матрица-наполнитель 0.790174 0.689722 0.905471 0.728034 -0.184461
In [92]:
def build_and_compile_model(normalizer):
    model_6 = keras.Sequential([
      normalizer,
      layers.Dense(64, activation='relu'),
      layers.Dense(128, activation='relu'),
      layers.Dense(256, activation='relu'),  
      layers.Dense(1)
    ])

    model_6.compile(optimizer=tf.keras.optimizers.Adam(0.001),
                loss='mean_squared_error')
    return model_6
In [93]:
mlp_6 = build_and_compile_model(normalizer)
mlp_6.summary()
Model: "sequential_9"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 normalization (Normalizatio  (None, 12)               25        
 n)                                                              
                                                                 
 dense_22 (Dense)            (None, 64)                832       
                                                                 
 dense_23 (Dense)            (None, 128)               8320      
                                                                 
 dense_24 (Dense)            (None, 256)               33024     
                                                                 
 dense_25 (Dense)            (None, 1)                 257       
                                                                 
=================================================================
Total params: 42,458
Trainable params: 42,433
Non-trainable params: 25
_________________________________________________________________
In [94]:
%%time
history = mlp_6.fit(
    x_train,
    y_train,
    epochs=1000,
    verbose=1,
    callbacks=[callback],
    validation_split=0.2)
Epoch 1/1000
17/17 [==============================] - 1s 11ms/step - loss: 3.2542 - val_loss: 1.6618
Epoch 2/1000
17/17 [==============================] - 0s 4ms/step - loss: 1.1777 - val_loss: 1.2899
Epoch 3/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.9948 - val_loss: 1.2303
Epoch 4/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.9375 - val_loss: 1.1950
Epoch 5/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8970 - val_loss: 1.1804
Epoch 6/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8774 - val_loss: 1.1596
Epoch 7/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8322 - val_loss: 1.2190
Epoch 8/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.7786 - val_loss: 1.1711
Epoch 9/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7514 - val_loss: 1.2009
Epoch 10/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7249 - val_loss: 1.1794
Epoch 11/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.6630 - val_loss: 1.1405
Epoch 12/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6315 - val_loss: 1.1941
Epoch 13/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.6015 - val_loss: 1.1469
Epoch 14/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.5747 - val_loss: 1.2254
Epoch 15/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.5477 - val_loss: 1.2240
Epoch 16/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.5061 - val_loss: 1.2223
Epoch 17/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.4873 - val_loss: 1.2510
Epoch 18/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.4339 - val_loss: 1.2079
Epoch 19/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.3969 - val_loss: 1.2160
Epoch 20/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.3701 - val_loss: 1.2347
Epoch 21/1000
 1/17 [>.............................] - ETA: 0s - loss: 0.2126Restoring model weights from the end of the best epoch: 11.
17/17 [==============================] - 0s 3ms/step - loss: 0.3638 - val_loss: 1.2471
Epoch 21: early stopping
Wall time: 2.02 s
In [95]:
def plot_loss(history, lim = [0, 10]):
    plt.plot(history.history['loss'], label='loss')
    plt.plot(history.history['val_loss'], label='val_loss')
    plt.ylim(lim)
    plt.xlabel('Epoch')
    plt.ylabel('MSE')
    plt.legend()
    plt.grid(True)
plot_loss(history)
In [96]:
y_pred_mlp_6 = mlp_6.predict(x_test)
9/9 [==============================] - 0s 2ms/step
In [97]:
a = plt.axes(aspect='equal')
plt.scatter(y_test,y_pred_mlp_6  )
plt.xlabel('True Values')
plt.ylabel('Predictions')
lims = [0, 5]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims)
In [98]:
def actual_and_predicted_plot(orig, predict, var, model_name):    
    plt.figure(figsize=(17,5))
    plt.title(f'Тестовые и прогнозные значения: {model_name}')
    plt.plot(orig, label = 'Тест')
    plt.plot(predict, label = 'Прогноз')
    plt.legend(loc = 'best')
    plt.ylabel(var)
    plt.xlabel('Количество наблюдений')
    plt.show()
actual_and_predicted_plot(y_test.values, mlp_6.predict(x_test.values), 'Cоотношение матрица/наполнитель', 'mlp_6')
9/9 [==============================] - 0s 2ms/step
In [99]:
df_mlp_6 = error(mlp_6, x_train, x_test, y_train, y_test,
    name = 'mlp_6', trg = 'Соотношение матрица-наполнитель')
df_mlp_6
9/9 [==============================] - 0s 0s/step
9/9 [==============================] - 0s 2ms/step
21/21 [==============================] - 0s 779us/step
21/21 [==============================] - 0s 780us/step
9/9 [==============================] - 0s 814us/step
Out[99]:
model Target param MAE(test) MAE(train) MSE(test) MSE(train) R_Squared
0 mlp_6 Соотношение матрица-наполнитель 0.881052 0.688248 1.13512 0.731736 -0.484868
In [100]:
df_mlp_result = pd.concat([df_mlp_1, df_mlp_2, df_mlp_3, df_mlp_4, df_mlp_5, df_mlp_6, ], axis=0).reset_index(drop = True)
df_mlp_result
Out[100]:
model Target param MAE(test) MAE(train) MSE(test) MSE(train) R_Squared
0 mlp_1 Соотношение матрица-наполнитель 0.842960 0.669361 1.039481 0.699295 -0.359761
1 mlp_2 Соотношение матрица-наполнитель 0.861006 0.671457 1.105257 0.699097 -0.445803
2 mlp_3 Соотношение матрица-наполнитель 0.868866 0.560671 1.120060 0.513567 -0.465168
3 mlp_4 Соотношение матрица-наполнитель 0.821671 0.714738 0.987026 0.798285 -0.291144
4 mlp_5 Соотношение матрица-наполнитель 0.790174 0.689722 0.905471 0.728034 -0.184461
5 mlp_6 Соотношение матрица-наполнитель 0.881052 0.688248 1.135120 0.731736 -0.484868

Добавим слой дропаут¶

In [101]:
def build_and_compile_model(normalizer):
    model_7 = keras.Sequential([
      normalizer,
      layers.Dense(64, activation='relu'),
      layers.Dropout(0.1),
      layers.Dense(128, activation='relu'),
      layers.Dropout(0.1),  
      layers.Dense(256, activation='relu'),
      layers.Dropout(0.1),  
      layers.Dense(1)
    ])

    model_7.compile(optimizer=tf.keras.optimizers.Adam(0.001),
                loss='mean_squared_error')
    return model_7
In [102]:
mlp_7 = build_and_compile_model(normalizer)
mlp_7.summary()
Model: "sequential_10"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 normalization (Normalizatio  (None, 12)               25        
 n)                                                              
                                                                 
 dense_26 (Dense)            (None, 64)                832       
                                                                 
 dropout (Dropout)           (None, 64)                0         
                                                                 
 dense_27 (Dense)            (None, 128)               8320      
                                                                 
 dropout_1 (Dropout)         (None, 128)               0         
                                                                 
 dense_28 (Dense)            (None, 256)               33024     
                                                                 
 dropout_2 (Dropout)         (None, 256)               0         
                                                                 
 dense_29 (Dense)            (None, 1)                 257       
                                                                 
=================================================================
Total params: 42,458
Trainable params: 42,433
Non-trainable params: 25
_________________________________________________________________
In [103]:
%%time
history = mlp_7.fit(
    x_train,
    y_train,
    epochs=1000,
    verbose=1,
    callbacks=[callback],
    validation_split=0.2)
Epoch 1/1000
17/17 [==============================] - 1s 11ms/step - loss: 3.5270 - val_loss: 1.6140
Epoch 2/1000
17/17 [==============================] - 0s 4ms/step - loss: 1.2996 - val_loss: 1.4228
Epoch 3/1000
17/17 [==============================] - 0s 4ms/step - loss: 1.1807 - val_loss: 1.2416
Epoch 4/1000
17/17 [==============================] - 0s 4ms/step - loss: 1.0723 - val_loss: 1.2019
Epoch 5/1000
17/17 [==============================] - 0s 4ms/step - loss: 1.0137 - val_loss: 1.1725
Epoch 6/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.9556 - val_loss: 1.1681
Epoch 7/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.9301 - val_loss: 1.1604
Epoch 8/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.9175 - val_loss: 1.1452
Epoch 9/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8491 - val_loss: 1.1137
Epoch 10/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8499 - val_loss: 1.0661
Epoch 11/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8023 - val_loss: 1.0657
Epoch 12/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.7893 - val_loss: 1.1422
Epoch 13/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7911 - val_loss: 1.0963
Epoch 14/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.7345 - val_loss: 1.1656
Epoch 15/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6650 - val_loss: 1.0726
Epoch 16/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6976 - val_loss: 1.0749
Epoch 17/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.6398 - val_loss: 1.1275
Epoch 18/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.6601 - val_loss: 1.1018
Epoch 19/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.5923 - val_loss: 1.1286
Epoch 20/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.6356 - val_loss: 1.0930
Epoch 21/1000
 1/17 [>.............................] - ETA: 0s - loss: 0.5557Restoring model weights from the end of the best epoch: 11.
17/17 [==============================] - 0s 3ms/step - loss: 0.6443 - val_loss: 1.2064
Epoch 21: early stopping
Wall time: 2.45 s
In [104]:
def plot_loss(history, lim = [0, 10]):
    plt.plot(history.history['loss'], label='loss')
    plt.plot(history.history['val_loss'], label='val_loss')
    plt.ylim(lim)
    plt.xlabel('Epoch')
    plt.ylabel('MSE')
    plt.legend()
    plt.grid(True)
plot_loss(history)
In [105]:
y_pred_mlp_7 = mlp_7.predict(x_test)
9/9 [==============================] - 0s 2ms/step
In [106]:
a = plt.axes(aspect='equal')
plt.scatter(y_test, y_pred_mlp_7)
plt.xlabel('True Values')
plt.ylabel('Predictions')
lims = [0, 5]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims)
In [107]:
def actual_and_predicted_plot(orig, predict, var, model_name):    
    plt.figure(figsize=(17,5))
    plt.title(f'Тестовые и прогнозные значения: {model_name}')
    plt.plot(orig, label = 'Тест')
    plt.plot(predict, label = 'Прогноз')
    plt.legend(loc = 'best')
    plt.ylabel(var)
    plt.xlabel('Количество наблюдений')
    plt.show()
actual_and_predicted_plot(y_test.values, mlp_7.predict(x_test.values), 'Cоотношение матрица/наполнитель', 'mlp_7')
9/9 [==============================] - 0s 2ms/step
In [108]:
df_mlp_7 = error(mlp_7, x_train, x_test, y_train, y_test,
    name = 'mlp_7', trg = 'Соотношение матрица-наполнитель')
df_mlp_7
9/9 [==============================] - 0s 2ms/step
9/9 [==============================] - 0s 2ms/step
21/21 [==============================] - 0s 782us/step
21/21 [==============================] - 0s 1ms/step
9/9 [==============================] - 0s 2ms/step
Out[108]:
model Target param MAE(test) MAE(train) MSE(test) MSE(train) R_Squared
0 mlp_7 Соотношение матрица-наполнитель 0.817151 0.689195 0.992198 0.745503 -0.297909

Изменим функцию активации в скрытом слое¶

In [109]:
def build_and_compile_model(normalizer):
    model_9 = keras.Sequential([
      normalizer,
      layers.Dense(64, activation='relu'),
      layers.Dropout(0.1),
      layers.Dense(128, activation='sigmoid'),
      layers.Dropout(0.1),  
      layers.Dense(256, activation='relu'),
      layers.Dropout(0.1),  
      layers.Dense(1)
    ])

    model_9.compile(optimizer=tf.keras.optimizers.Adam(0.001),
                loss='mean_squared_error')
    return model_9
In [110]:
mlp_9 = build_and_compile_model(normalizer)
mlp_9.summary()
Model: "sequential_11"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 normalization (Normalizatio  (None, 12)               25        
 n)                                                              
                                                                 
 dense_30 (Dense)            (None, 64)                832       
                                                                 
 dropout_3 (Dropout)         (None, 64)                0         
                                                                 
 dense_31 (Dense)            (None, 128)               8320      
                                                                 
 dropout_4 (Dropout)         (None, 128)               0         
                                                                 
 dense_32 (Dense)            (None, 256)               33024     
                                                                 
 dropout_5 (Dropout)         (None, 256)               0         
                                                                 
 dense_33 (Dense)            (None, 1)                 257       
                                                                 
=================================================================
Total params: 42,458
Trainable params: 42,433
Non-trainable params: 25
_________________________________________________________________
In [111]:
%%time
history = mlp_9.fit(
    x_train,
    y_train,
    epochs=1000,
    verbose=1,
    callbacks=[callback],
    validation_split=0.2)
Epoch 1/1000
17/17 [==============================] - 1s 11ms/step - loss: 2.1964 - val_loss: 0.8801
Epoch 2/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8989 - val_loss: 0.8363
Epoch 3/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8646 - val_loss: 0.8492
Epoch 4/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8634 - val_loss: 0.8364
Epoch 5/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8803 - val_loss: 0.8636
Epoch 6/1000
17/17 [==============================] - 0s 5ms/step - loss: 0.8611 - val_loss: 0.9325
Epoch 7/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8491 - val_loss: 0.8525
Epoch 8/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.8506 - val_loss: 0.8452
Epoch 9/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8134 - val_loss: 0.9518
Epoch 10/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8722 - val_loss: 0.8844
Epoch 11/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8331 - val_loss: 0.8493
Epoch 12/1000
 1/17 [>.............................] - ETA: 0s - loss: 0.6473Restoring model weights from the end of the best epoch: 2.
17/17 [==============================] - 0s 4ms/step - loss: 0.8060 - val_loss: 0.8494
Epoch 12: early stopping
Wall time: 1.74 s
In [112]:
def plot_loss(history, lim = [0, 10]):
    plt.plot(history.history['loss'], label='loss')
    plt.plot(history.history['val_loss'], label='val_loss')
    plt.ylim(lim)
    plt.xlabel('Epoch')
    plt.ylabel('MSE')
    plt.legend()
    plt.grid(True)
plot_loss(history)
In [113]:
y_pred_mlp_9 = mlp_9.predict(x_test)
9/9 [==============================] - 0s 2ms/step
In [114]:
def actual_and_predicted_plot(orig, predict, var, model_name):    
    plt.figure(figsize=(17,5))
    plt.title(f'Тестовые и прогнозные значения: {model_name}')
    plt.plot(orig, label = 'Тест')
    plt.plot(predict, label = 'Прогноз')
    plt.legend(loc = 'best')
    plt.ylabel(var)
    plt.xlabel('Количество наблюдений')
    plt.show()
actual_and_predicted_plot(y_test.values, mlp_9.predict(x_test.values), 'Cоотношение матрица/наполнитель', 'mlp_9')
9/9 [==============================] - 0s 0s/step
In [115]:
df_mlp_9 = error(mlp_9, x_train, x_test, y_train, y_test,
    name = 'mlp_9', trg = 'Соотношение матрица-наполнитель')
df_mlp_9
9/9 [==============================] - 0s 2ms/step
9/9 [==============================] - 0s 0s/step
21/21 [==============================] - 0s 781us/step
21/21 [==============================] - 0s 1ms/step
9/9 [==============================] - 0s 0s/step
Out[115]:
model Target param MAE(test) MAE(train) MSE(test) MSE(train) R_Squared
0 mlp_9 Соотношение матрица-наполнитель 0.713825 0.727068 0.776962 0.822633 -0.016355
In [ ]:
 
In [116]:
def build_and_compile_model(normalizer):
    model_10 = keras.Sequential([
      normalizer,
      layers.Dense(64, activation='softmax'),
      layers.Dropout(0.1),
      layers.Dense(128, activation='sigmoid'),
      layers.Dropout(0.1),  
      layers.Dense(256, activation='relu'),
      layers.Dropout(0.1),  
      layers.Dense(1)
    ])

    model_10.compile(optimizer=tf.keras.optimizers.SGD(0.001),
                loss='mean_squared_error')
    return model_10
In [117]:
mlp_10 = build_and_compile_model(normalizer)
mlp_10.summary()
Model: "sequential_12"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 normalization (Normalizatio  (None, 12)               25        
 n)                                                              
                                                                 
 dense_34 (Dense)            (None, 64)                832       
                                                                 
 dropout_6 (Dropout)         (None, 64)                0         
                                                                 
 dense_35 (Dense)            (None, 128)               8320      
                                                                 
 dropout_7 (Dropout)         (None, 128)               0         
                                                                 
 dense_36 (Dense)            (None, 256)               33024     
                                                                 
 dropout_8 (Dropout)         (None, 256)               0         
                                                                 
 dense_37 (Dense)            (None, 1)                 257       
                                                                 
=================================================================
Total params: 42,458
Trainable params: 42,433
Non-trainable params: 25
_________________________________________________________________
In [118]:
%%time
history = mlp_10.fit(
    x_train,
    y_train,
    epochs=1000,
    verbose=1,
    callbacks=[callback],
    validation_split=0.2)
Epoch 1/1000
17/17 [==============================] - 1s 10ms/step - loss: 4.5760 - val_loss: 1.2384
Epoch 2/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.9784 - val_loss: 0.8680
Epoch 3/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8747 - val_loss: 0.8330
Epoch 4/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8697 - val_loss: 0.8332
Epoch 5/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8782 - val_loss: 0.8335
Epoch 6/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8643 - val_loss: 0.8406
Epoch 7/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8690 - val_loss: 0.8339
Epoch 8/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8332 - val_loss: 0.8356
Epoch 9/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8743 - val_loss: 0.8362
Epoch 10/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.9268 - val_loss: 0.8339
Epoch 11/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8798 - val_loss: 0.8372
Epoch 12/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8735 - val_loss: 0.8357
Epoch 13/1000
 1/17 [>.............................] - ETA: 0s - loss: 1.3450Restoring model weights from the end of the best epoch: 3.
17/17 [==============================] - 0s 5ms/step - loss: 0.8837 - val_loss: 0.8346
Epoch 13: early stopping
Wall time: 1.71 s
In [119]:
def plot_loss(history, lim = [0, 10]):
    plt.plot(history.history['loss'], label='loss')
    plt.plot(history.history['val_loss'], label='val_loss')
    plt.ylim(lim)
    plt.xlabel('Epoch')
    plt.ylabel('MSE')
    plt.legend()
    plt.grid(True)
plot_loss(history)
In [120]:
y_pred_mlp_10 = mlp_10.predict(x_test)
9/9 [==============================] - 0s 2ms/step
In [121]:
def actual_and_predicted_plot(orig, predict, var, model_name):    
    plt.figure(figsize=(17,5))
    plt.title(f'Тестовые и прогнозные значения: {model_name}')
    plt.plot(orig, label = 'Тест')
    plt.plot(predict, label = 'Прогноз')
    plt.legend(loc = 'best')
    plt.ylabel(var)
    plt.xlabel('Количество наблюдений')
    plt.show()
actual_and_predicted_plot(y_test.values, mlp_10.predict(x_test.values), 'Cоотношение матрица/наполнитель', 'mlp_10')
9/9 [==============================] - 0s 0s/step
In [122]:
df_mlp_10 = error(mlp_10, x_train, x_test, y_train, y_test,
    name = 'mlp_10', trg = 'Соотношение матрица-наполнитель')
df_mlp_10
9/9 [==============================] - 0s 2ms/step
9/9 [==============================] - 0s 2ms/step
21/21 [==============================] - 0s 2ms/step
21/21 [==============================] - 0s 779us/step
9/9 [==============================] - 0s 2ms/step
Out[122]:
model Target param MAE(test) MAE(train) MSE(test) MSE(train) R_Squared
0 mlp_10 Соотношение матрица-наполнитель 0.710387 0.721993 0.764572 0.816566 -0.000148
In [ ]:
 
In [ ]:
 
In [123]:
df_END = pd.concat([df_result, df_mlp_result, df_mlp_7,df_mlp_9,df_mlp_10], axis=0).reset_index(drop = True)
df_END
Out[123]:
model Target param MAE(test) MAE(train) MSE(test) MSE(train) R_Squared
0 model_1 Соотношение матрица-наполнитель 0.718846 0.727980 0.769704 0.832339 -0.006862
1 model_2 Соотношение матрица-наполнитель 0.711121 0.741844 0.756206 0.854804 0.010795
2 model_3 Соотношение матрица-наполнитель 0.746268 0.751420 0.828954 0.881878 -0.084367
3 model_4 Соотношение матрица-наполнитель 0.715961 0.719260 0.775988 0.809827 -0.015082
4 mlp_1 Соотношение матрица-наполнитель 0.842960 0.669361 1.039481 0.699295 -0.359761
5 mlp_2 Соотношение матрица-наполнитель 0.861006 0.671457 1.105257 0.699097 -0.445803
6 mlp_3 Соотношение матрица-наполнитель 0.868866 0.560671 1.120060 0.513567 -0.465168
7 mlp_4 Соотношение матрица-наполнитель 0.821671 0.714738 0.987026 0.798285 -0.291144
8 mlp_5 Соотношение матрица-наполнитель 0.790174 0.689722 0.905471 0.728034 -0.184461
9 mlp_6 Соотношение матрица-наполнитель 0.881052 0.688248 1.135120 0.731736 -0.484868
10 mlp_7 Соотношение матрица-наполнитель 0.817151 0.689195 0.992198 0.745503 -0.297909
11 mlp_9 Соотношение матрица-наполнитель 0.713825 0.727068 0.776962 0.822633 -0.016355
12 mlp_10 Соотношение матрица-наполнитель 0.710387 0.721993 0.764572 0.816566 -0.000148

Метод GridSearchCV для нейросети¶

In [124]:
def create_model_GSCV(lyrs=[32], act='softmax', optimizer='adam', dr=0.1):
    
    seed = 7
    np.random.seed(seed)
    tf.random.set_seed(seed)
    
    model_GSCV = Sequential()
    model_GSCV.add(Dense(lyrs[0], input_dim=x_train.shape[1], activation=act)) 
    for i in range(1,len(lyrs)):
        model_GSCV.add(Dense(lyrs[i], activation=act))
    
    model_GSCV.add(Dropout(dr))
    model_GSCV.add(Dense(1))  # выходной слой
    
    model_GSCV.compile(loss='mean_squared_error', optimizer='adam', metrics=['mae'])
 
    return model_GSCV

Ищем оптимальное число epochs и размер batch_size¶

In [125]:
model_GSCV= KerasRegressor(build_fn=create_model_GSCV, verbose=0)

batch_size = [10, 20, 30, 40, 50]
epochs = [10, 50, 100]

param_grid = dict(batch_size=batch_size, epochs=epochs)

grid = GridSearchCV(estimator=model_GSCV, param_grid=param_grid, n_jobs=1,cv=10)
grid_result = grid.fit(x_train, y_train)

#summarize results
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
    print("%f (%f) with: %r" % (mean, stdev, param))
WARNING:tensorflow:5 out of the last 14 calls to <function Model.make_test_function.<locals>.test_function at 0x000002A1889A8280> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for  more details.
WARNING:tensorflow:5 out of the last 13 calls to <function Model.make_test_function.<locals>.test_function at 0x000002A188B3B4C0> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for  more details.
Best: -0.821224 using {'batch_size': 10, 'epochs': 100}
-3.299298 (0.447570) with: {'batch_size': 10, 'epochs': 10}
-0.831215 (0.068940) with: {'batch_size': 10, 'epochs': 50}
-0.821224 (0.062896) with: {'batch_size': 10, 'epochs': 100}
-5.717728 (1.300960) with: {'batch_size': 20, 'epochs': 10}
-1.164103 (0.209991) with: {'batch_size': 20, 'epochs': 50}
-0.834081 (0.064314) with: {'batch_size': 20, 'epochs': 100}
-6.233018 (1.099995) with: {'batch_size': 30, 'epochs': 10}
-2.137219 (0.574105) with: {'batch_size': 30, 'epochs': 50}
-0.899505 (0.083991) with: {'batch_size': 30, 'epochs': 100}
-7.115469 (1.003307) with: {'batch_size': 40, 'epochs': 10}
-2.738080 (0.668054) with: {'batch_size': 40, 'epochs': 50}
-1.224353 (0.172646) with: {'batch_size': 40, 'epochs': 100}
-7.024797 (0.965049) with: {'batch_size': 50, 'epochs': 10}
-3.962270 (0.893006) with: {'batch_size': 50, 'epochs': 50}
-1.490331 (0.196473) with: {'batch_size': 50, 'epochs': 100}

Ищем optimizer¶

In [126]:
model_GSCV = KerasRegressor(build_fn=create_model_GSCV, epochs=100, batch_size=10, verbose=0)

optimizer = ['SGD', 'RMSprop',  'Adam', ]
param_grid = dict(optimizer=optimizer)

grid = GridSearchCV(estimator=model_GSCV, param_grid=param_grid, cv=10, verbose=2)
grid_result = grid.fit(x_train, y_train)
Fitting 10 folds for each of 3 candidates, totalling 30 fits
[CV] END ......................................optimizer=SGD; total time=   6.1s
[CV] END ......................................optimizer=SGD; total time=   6.2s
[CV] END ......................................optimizer=SGD; total time=   6.4s
[CV] END ......................................optimizer=SGD; total time=   6.7s
[CV] END ......................................optimizer=SGD; total time=   6.0s
[CV] END ......................................optimizer=SGD; total time=   6.4s
[CV] END ......................................optimizer=SGD; total time=   6.3s
[CV] END ......................................optimizer=SGD; total time=   6.6s
[CV] END ......................................optimizer=SGD; total time=   6.5s
[CV] END ......................................optimizer=SGD; total time=   6.2s
[CV] END ..................................optimizer=RMSprop; total time=   5.9s
[CV] END ..................................optimizer=RMSprop; total time=   6.0s
[CV] END ..................................optimizer=RMSprop; total time=   5.8s
[CV] END ..................................optimizer=RMSprop; total time=   6.0s
[CV] END ..................................optimizer=RMSprop; total time=   5.9s
[CV] END ..................................optimizer=RMSprop; total time=   6.3s
[CV] END ..................................optimizer=RMSprop; total time=   6.5s
[CV] END ..................................optimizer=RMSprop; total time=   6.8s
[CV] END ..................................optimizer=RMSprop; total time=   6.7s
[CV] END ..................................optimizer=RMSprop; total time=   6.5s
[CV] END .....................................optimizer=Adam; total time=   6.0s
[CV] END .....................................optimizer=Adam; total time=   6.3s
[CV] END .....................................optimizer=Adam; total time=   6.4s
[CV] END .....................................optimizer=Adam; total time=   6.4s
[CV] END .....................................optimizer=Adam; total time=   6.7s
[CV] END .....................................optimizer=Adam; total time=   7.5s
[CV] END .....................................optimizer=Adam; total time=   6.7s
[CV] END .....................................optimizer=Adam; total time=   7.0s
[CV] END .....................................optimizer=Adam; total time=   6.8s
[CV] END .....................................optimizer=Adam; total time=   7.5s
In [127]:
# результаты
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
    print("%f (%f) with: %r" % (mean, stdev, param))
Best: -0.821385 using {'optimizer': 'RMSprop'}
-0.824207 (0.061265) with: {'optimizer': 'SGD'}
-0.821385 (0.063087) with: {'optimizer': 'RMSprop'}
-0.822261 (0.065578) with: {'optimizer': 'Adam'}

Ищем колличество слоев¶

In [128]:
model_GSCV = KerasRegressor(build_fn=create_model_GSCV, epochs=100, batch_size=10, verbose=0)

layers = [[64, 64], [32, 64, 128], [64, 128, 256]]
param_grid = dict(lyrs=layers)

grid = GridSearchCV(estimator=model_GSCV, param_grid=param_grid, cv=10, verbose=2)
grid_result = grid.fit(x_train, y_train)
Fitting 10 folds for each of 3 candidates, totalling 30 fits
[CV] END ......................................lyrs=[64, 64]; total time=   7.0s
[CV] END ......................................lyrs=[64, 64]; total time=   7.0s
[CV] END ......................................lyrs=[64, 64]; total time=   6.8s
[CV] END ......................................lyrs=[64, 64]; total time=   6.8s
[CV] END ......................................lyrs=[64, 64]; total time=   7.0s
[CV] END ......................................lyrs=[64, 64]; total time=   9.5s
[CV] END ......................................lyrs=[64, 64]; total time=   7.8s
[CV] END ......................................lyrs=[64, 64]; total time=   7.7s
[CV] END ......................................lyrs=[64, 64]; total time=   7.7s
[CV] END ......................................lyrs=[64, 64]; total time=   7.8s
[CV] END .................................lyrs=[32, 64, 128]; total time=   8.4s
[CV] END .................................lyrs=[32, 64, 128]; total time=   8.1s
[CV] END .................................lyrs=[32, 64, 128]; total time=   8.3s
[CV] END .................................lyrs=[32, 64, 128]; total time=   8.1s
[CV] END .................................lyrs=[32, 64, 128]; total time=   7.7s
[CV] END .................................lyrs=[32, 64, 128]; total time=   8.6s
[CV] END .................................lyrs=[32, 64, 128]; total time=   8.2s
[CV] END .................................lyrs=[32, 64, 128]; total time=   8.5s
[CV] END .................................lyrs=[32, 64, 128]; total time=   8.4s
[CV] END .................................lyrs=[32, 64, 128]; total time=   8.5s
[CV] END ................................lyrs=[64, 128, 256]; total time=   8.9s
[CV] END ................................lyrs=[64, 128, 256]; total time=   8.7s
[CV] END ................................lyrs=[64, 128, 256]; total time=   8.4s
[CV] END ................................lyrs=[64, 128, 256]; total time=   8.6s
[CV] END ................................lyrs=[64, 128, 256]; total time=   8.9s
[CV] END ................................lyrs=[64, 128, 256]; total time=   9.1s
[CV] END ................................lyrs=[64, 128, 256]; total time=   9.6s
[CV] END ................................lyrs=[64, 128, 256]; total time=   9.0s
[CV] END ................................lyrs=[64, 128, 256]; total time=  10.9s
[CV] END ................................lyrs=[64, 128, 256]; total time=  10.0s
In [129]:
# результаты
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
    print("%f (%f) with: %r" % (mean, stdev, param))
Best: -0.817829 using {'lyrs': [64, 128, 256]}
-0.818339 (0.061469) with: {'lyrs': [64, 64]}
-0.818046 (0.061630) with: {'lyrs': [32, 64, 128]}
-0.817829 (0.061702) with: {'lyrs': [64, 128, 256]}

Ищем оптимальные параметры для слоев Dropout¶

In [130]:
model_GSCV =  KerasRegressor(build_fn=create_model_GSCV, epochs=100, batch_size=10, verbose=0)

drops = [0.0, 0.01, 0.05, 0.1, 0.2, 0.3, 0.5]
param_grid = dict(dr=drops)

grid = GridSearchCV(estimator=model_GSCV, param_grid=param_grid, cv=10, verbose=2)
grid_result = grid.fit(x_train, y_train)
Fitting 10 folds for each of 7 candidates, totalling 70 fits
[CV] END .............................................dr=0.0; total time=   6.2s
[CV] END .............................................dr=0.0; total time=   6.6s
[CV] END .............................................dr=0.0; total time=   6.2s
[CV] END .............................................dr=0.0; total time=   6.7s
[CV] END .............................................dr=0.0; total time=   6.2s
[CV] END .............................................dr=0.0; total time=   8.4s
[CV] END .............................................dr=0.0; total time=   7.0s
[CV] END .............................................dr=0.0; total time=   6.5s
[CV] END .............................................dr=0.0; total time=   6.1s
[CV] END .............................................dr=0.0; total time=   7.6s
[CV] END ............................................dr=0.01; total time=   6.1s
[CV] END ............................................dr=0.01; total time=   6.9s
[CV] END ............................................dr=0.01; total time=   6.8s
[CV] END ............................................dr=0.01; total time=   6.0s
[CV] END ............................................dr=0.01; total time=   6.3s
[CV] END ............................................dr=0.01; total time=   6.4s
[CV] END ............................................dr=0.01; total time=   6.9s
[CV] END ............................................dr=0.01; total time=   7.2s
[CV] END ............................................dr=0.01; total time=   7.2s
[CV] END ............................................dr=0.01; total time=   7.0s
[CV] END ............................................dr=0.05; total time=   6.2s
[CV] END ............................................dr=0.05; total time=   6.3s
[CV] END ............................................dr=0.05; total time=   6.2s
[CV] END ............................................dr=0.05; total time=   6.6s
[CV] END ............................................dr=0.05; total time=   6.8s
[CV] END ............................................dr=0.05; total time=   7.4s
[CV] END ............................................dr=0.05; total time=   6.9s
[CV] END ............................................dr=0.05; total time=   6.5s
[CV] END ............................................dr=0.05; total time=   7.2s
[CV] END ............................................dr=0.05; total time=   7.0s
[CV] END .............................................dr=0.1; total time=   6.7s
[CV] END .............................................dr=0.1; total time=   6.7s
[CV] END .............................................dr=0.1; total time=   6.6s
[CV] END .............................................dr=0.1; total time=   6.6s
[CV] END .............................................dr=0.1; total time=   5.9s
[CV] END .............................................dr=0.1; total time=   6.6s
[CV] END .............................................dr=0.1; total time=   6.3s
[CV] END .............................................dr=0.1; total time=   6.5s
[CV] END .............................................dr=0.1; total time=   6.6s
[CV] END .............................................dr=0.1; total time=   6.6s
[CV] END .............................................dr=0.2; total time=   6.4s
[CV] END .............................................dr=0.2; total time=   5.9s
[CV] END .............................................dr=0.2; total time=   6.0s
[CV] END .............................................dr=0.2; total time=   6.1s
[CV] END .............................................dr=0.2; total time=   5.9s
[CV] END .............................................dr=0.2; total time=   6.5s
[CV] END .............................................dr=0.2; total time=   6.2s
[CV] END .............................................dr=0.2; total time=   6.3s
[CV] END .............................................dr=0.2; total time=   6.8s
[CV] END .............................................dr=0.2; total time=   6.3s
[CV] END .............................................dr=0.3; total time=   6.2s
[CV] END .............................................dr=0.3; total time=   5.9s
[CV] END .............................................dr=0.3; total time=   5.9s
[CV] END .............................................dr=0.3; total time=   6.1s
[CV] END .............................................dr=0.3; total time=   5.9s
[CV] END .............................................dr=0.3; total time=   6.5s
[CV] END .............................................dr=0.3; total time=   6.4s
[CV] END .............................................dr=0.3; total time=   6.2s
[CV] END .............................................dr=0.3; total time=   6.5s
[CV] END .............................................dr=0.3; total time=   6.4s
[CV] END .............................................dr=0.5; total time=   6.1s
[CV] END .............................................dr=0.5; total time=   5.9s
[CV] END .............................................dr=0.5; total time=   5.9s
[CV] END .............................................dr=0.5; total time=   6.1s
[CV] END .............................................dr=0.5; total time=   5.9s
[CV] END .............................................dr=0.5; total time=   6.3s
[CV] END .............................................dr=0.5; total time=   6.4s
[CV] END .............................................dr=0.5; total time=   6.6s
[CV] END .............................................dr=0.5; total time=   6.5s
[CV] END .............................................dr=0.5; total time=   6.4s
In [131]:
# результаты
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
    print("%f (%f) with: %r" % (mean, stdev, param))
Best: -0.817912 using {'dr': 0.01}
-0.817945 (0.061471) with: {'dr': 0.0}
-0.817912 (0.061819) with: {'dr': 0.01}
-0.819497 (0.063015) with: {'dr': 0.05}
-0.821560 (0.062400) with: {'dr': 0.1}
-0.824801 (0.064003) with: {'dr': 0.2}
-0.824343 (0.064913) with: {'dr': 0.3}
-0.819914 (0.064602) with: {'dr': 0.5}
In [132]:
from tensorflow import keras
from tensorflow.keras import layers
from keras.models import Sequential
from keras.layers import Activation, Dense
from tensorflow.keras.layers import Dropout
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [133]:
def build_and_compile_model(normalizer):
    model_8 = keras.Sequential([
      normalizer,
      layers.Dense(64, activation='relu'),
      layers.Dropout(0.1),
      layers.Dense(128, activation='relu'),
      layers.Dropout(0.1),  
      layers.Dense(256, activation='relu'),
      layers.Dropout(0.1),  
      layers.Dense(1)
    ])

    model_8.compile(optimizer=tf.keras.optimizers.Adam(0.001),
                loss='mean_squared_error')
    return model_8
In [134]:
mlp_8 = build_and_compile_model(normalizer)
mlp_8.summary()
Model: "sequential_297"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 normalization (Normalizatio  (None, 12)               25        
 n)                                                              
                                                                 
 dense_658 (Dense)           (None, 64)                832       
                                                                 
 dropout_293 (Dropout)       (None, 64)                0         
                                                                 
 dense_659 (Dense)           (None, 128)               8320      
                                                                 
 dropout_294 (Dropout)       (None, 128)               0         
                                                                 
 dense_660 (Dense)           (None, 256)               33024     
                                                                 
 dropout_295 (Dropout)       (None, 256)               0         
                                                                 
 dense_661 (Dense)           (None, 1)                 257       
                                                                 
=================================================================
Total params: 42,458
Trainable params: 42,433
Non-trainable params: 25
_________________________________________________________________
In [135]:
%%time
history = mlp_8.fit(
    x_train,
    y_train,
    epochs=1000,
    verbose=1,
    callbacks=[callback],
    validation_split=0.2)
Epoch 1/1000
17/17 [==============================] - 1s 14ms/step - loss: 3.7026 - val_loss: 1.6097
Epoch 2/1000
17/17 [==============================] - 0s 4ms/step - loss: 1.3423 - val_loss: 1.3772
Epoch 3/1000
17/17 [==============================] - 0s 3ms/step - loss: 1.1513 - val_loss: 1.2087
Epoch 4/1000
17/17 [==============================] - 0s 4ms/step - loss: 1.0929 - val_loss: 1.2018
Epoch 5/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.9873 - val_loss: 1.1109
Epoch 6/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.9832 - val_loss: 1.1591
Epoch 7/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.9400 - val_loss: 1.1270
Epoch 8/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.9178 - val_loss: 1.0897
Epoch 9/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.9542 - val_loss: 1.0921
Epoch 10/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8216 - val_loss: 1.0907
Epoch 11/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.8521 - val_loss: 1.0188
Epoch 12/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.7969 - val_loss: 1.0533
Epoch 13/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.7642 - val_loss: 1.0745
Epoch 14/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.7685 - val_loss: 1.0037
Epoch 15/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.7159 - val_loss: 1.0137
Epoch 16/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.7069 - val_loss: 0.9710
Epoch 17/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.7123 - val_loss: 1.0350
Epoch 18/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.6500 - val_loss: 0.9726
Epoch 19/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.6585 - val_loss: 1.0158
Epoch 20/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.6813 - val_loss: 0.9774
Epoch 21/1000
17/17 [==============================] - 0s 3ms/step - loss: 0.5757 - val_loss: 1.0210
Epoch 22/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.6101 - val_loss: 1.0184
Epoch 23/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.6106 - val_loss: 1.0027
Epoch 24/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.6275 - val_loss: 1.0308
Epoch 25/1000
17/17 [==============================] - 0s 4ms/step - loss: 0.5650 - val_loss: 1.0220
Epoch 26/1000
 1/17 [>.............................] - ETA: 0s - loss: 0.5373Restoring model weights from the end of the best epoch: 16.
17/17 [==============================] - 0s 5ms/step - loss: 0.5613 - val_loss: 1.0978
Epoch 26: early stopping
Wall time: 2.84 s
In [136]:
def plot_loss(history, lim = [0, 10]):
    plt.plot(history.history['loss'], label='loss')
    plt.plot(history.history['val_loss'], label='val_loss')
    plt.ylim(lim)
    plt.xlabel('Epoch')
    plt.ylabel('MSE')
    plt.legend()
    plt.grid(True)
plot_loss(history)
In [137]:
mlp_8.evaluate(x_test, y_test)
9/9 [==============================] - 0s 2ms/step - loss: 1.0025
Out[137]:
1.0024641752243042
In [142]:
def actual_and_predicted_plot(orig, predict, var, model_name):    
    plt.figure(figsize=(17,5))
    plt.title(f'Тестовые и прогнозные значения: {model_name}')
    plt.plot(orig, label = 'Тест')
    plt.plot(predict, label = 'Прогноз')
    plt.legend(loc = 'best')
    plt.ylabel(var)
    plt.xlabel('Количество наблюдений')
    plt.show()
actual_and_predicted_plot(y_test.values, mlp_8.predict(x_test.values), 'Cоотношение матрица/наполнитель', 'mlp_10')
9/9 [==============================] - 0s 2ms/step
In [144]:
df_mlp_8 = error(mlp_8, x_train, x_test, y_train, y_test,
    name = 'mlp_8', trg = 'Соотношение матрица-наполнитель')
df_mlp_8
9/9 [==============================] - 0s 813us/step
9/9 [==============================] - 0s 2ms/step
21/21 [==============================] - 0s 779us/step
21/21 [==============================] - 0s 1ms/step
9/9 [==============================] - 0s 2ms/step
Out[144]:
model Target param MAE(test) MAE(train) MSE(test) MSE(train) R_Squared
0 mlp_8 Соотношение матрица-наполнитель 0.824951 0.65026 1.002464 0.657263 -0.311339
In [138]:
mlp_8.save('App/mlp_8/NEIRO_1')
WARNING:absl:Found untraced functions such as _update_step_xla while saving (showing 1 of 1). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: App/mlp_8/NEIRO_1\assets
INFO:tensorflow:Assets written to: App/mlp_8/NEIRO_1\assets
In [139]:
mlp_8_loaded = keras.models.load_model('App/mlp_8/NEIRO_1')
In [140]:
mlp_8_loaded.evaluate(x_test, y_test)
9/9 [==============================] - 0s 1ms/step - loss: 1.0025
Out[140]:
1.0024641752243042
In [141]:
print(tf.__version__)
2.11.0
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]: